From 56018b5d6efa2afb913ada5c377d37ad0f3cbe80 Mon Sep 17 00:00:00 2001 From: Paul Brauner <141240651+paulbrauner-da@users.noreply.github.com> Date: Wed, 6 Dec 2023 10:47:47 +0100 Subject: [PATCH] do a proper canton 3.x code drop in the canton-3x directory (#17980) * do a proper canton 3.x code drop in the canton-3x directory * copy the code from canton3 * address Gary's comments * fix canton-3x --- .bazelignore | 2 +- azure-cron.yml | 2 +- .../community/LICENSE-open-source-bundle.txt | 201 + .../admin/api/client/GrpcCtlRunner.scala | 69 + .../api/client/commands/AdminCommand.scala | 138 + .../client/commands/DomainAdminCommands.scala | 81 + .../client/commands/DomainTimeCommands.scala | 75 + ...rpriseMediatorAdministrationCommands.scala | 175 + .../EnterpriseSequencerAdminCommands.scala | 289 ++ .../client/commands/LedgerApiCommands.scala | 1588 ++++++++ .../commands/LedgerApiTypeWrappers.scala | 112 + .../client/commands/LedgerApiV2Commands.scala | 849 +++++ .../commands/ParticipantAdminCommands.scala | 1223 ++++++ .../commands/PruningSchedulerCommands.scala | 158 + .../commands/SequencerAdminCommands.scala | 68 + .../client/commands/StatusAdminCommands.scala | 89 + .../commands/TopologyAdminCommands.scala | 774 ++++ .../commands/TopologyAdminCommandsX.scala | 771 ++++ .../client/commands/VaultAdminCommands.scala | 371 ++ .../client/data/CommunityCantonStatus.scala | 104 + .../client/data/ConsoleApiDataObjects.scala | 52 + .../api/client/data/DomainParameters.scala | 180 + .../api/client/data/LedgerApiObjectMeta.scala | 14 + .../admin/api/client/data/Metering.scala | 66 + .../admin/api/client/data/PartyDetails.scala | 49 + .../api/client/data/PruningSchedule.scala | 54 + .../admin/api/client/data/TemplateId.scala | 53 + .../admin/api/client/data/Topology.scala | 247 ++ .../api/client/data/UserManagement.scala | 125 + .../api/client/data/crypto/CryptoKeys.scala | 27 + .../api/client/data/crypto/Encryption.scala | 41 + .../admin/api/client/data/crypto/Hash.scala | 10 + .../api/client/data/crypto/Signing.scala | 29 + .../api/client/data/topologyx/TopologyX.scala | 325 ++ .../canton/config/CantonCommunityConfig.scala | 140 + .../canton/config/CantonConfig.scala | 1600 ++++++++ .../config/CommunityConfigValidations.scala | 302 ++ .../canton/config/ConfigErrors.scala | 137 + .../canton/console/AdminCommandRunner.scala | 118 + .../canton/console/AmmoniteCacheLock.scala | 135 + .../console/AmmoniteConsoleConfig.scala | 91 + .../console/BootstrapScriptException.scala | 8 + .../CommunityCantonHealthAdministration.scala | 195 + .../CommunityHealthDumpGenerator.scala | 31 + .../canton/console/ConsoleCommandResult.scala | 178 + .../canton/console/ConsoleEnvironment.scala | 646 ++++ .../console/ConsoleEnvironmentBinding.scala | 74 + .../canton/console/ConsoleErrorHandler.scala | 32 + .../ConsoleGrpcAdminCommandRunner.scala | 127 + .../canton/console/ConsoleMacros.scala | 847 +++++ .../canton/console/ConsoleOutput.scala | 26 + .../canton/console/FeatureFlag.scala | 34 + .../canton/console/HealthDumpGenerator.scala | 127 + .../digitalasset/canton/console/Help.scala | 349 ++ .../digitalasset/canton/console/Helpful.scala | 23 + .../canton/console/InstanceReference.scala | 810 ++++ .../canton/console/InstancesExtensions.scala | 75 + .../ParticipantReferencesExtensions.scala | 146 + .../commands/ConsoleCommandGroup.scala | 26 + .../commands/DomainAdministration.scala | 438 +++ .../GrpcByteChunksToFileObserver.scala | 47 + .../commands/HealthAdministration.scala | 180 + .../commands/LedgerApiAdministration.scala | 3282 +++++++++++++++++ .../MediatorAdministrationGroup.scala | 232 ++ .../commands/ParticipantAdministration.scala | 1721 +++++++++ .../ParticipantRepairAdministration.scala | 368 ++ .../commands/PartiesAdministration.scala | 398 ++ .../commands/PartiesAdministrationX.scala | 430 +++ .../PruningSchedulerAdministration.scala | 109 + .../SequencerAdministrationGroup.scala | 274 ++ .../commands/TopologyAdministration.scala | 1282 +++++++ .../commands/TopologyAdministrationX.scala | 1875 ++++++++++ .../TrafficControlAdministrationGroup.scala | 70 + ...cControlSequencerAdministrationGroup.scala | 63 + .../commands/VaultAdministration.scala | 613 +++ .../canton/console/commands/package.scala | 45 + .../digitalasset/canton/console/package.scala | 44 + .../environment/CommunityEnvironment.scala | 147 + .../canton/environment/Environment.scala | 627 ++++ .../canton/environment/Errors.scala | 72 + .../canton/environment/Nodes.scala | 440 +++ .../canton/health/HealthCheck.scala | 232 ++ .../canton/health/HealthCheckResult.scala | 15 + .../canton/health/HealthServer.scala | 87 + .../canton/metrics/MetricsFactory.scala | 382 ++ .../canton/metrics/MetricsSnapshot.scala | 32 + .../canton/metrics/Reporters.scala | 16 + .../resources/LICENSE-open-source-bundle.txt | 1 + .../app/src/main/resources/application.conf | 15 + .../app/src/main/resources/logback.xml | 281 ++ .../app/src/main/resources/repl/banner.txt | 10 + .../digitalasset/canton/CantonAppDriver.scala | 219 ++ .../canton/CantonCommunityApp.scala | 25 + .../com/digitalasset/canton/Runner.scala | 186 + .../com/digitalasset/canton/cli/Cli.scala | 387 ++ .../com/digitalasset/canton/cli/Command.scala | 29 + .../digitalasset/canton/config/Generate.scala | 42 + .../canton/console/BindingsBridge.scala | 12 + .../canton/console/HeadlessConsole.scala | 259 ++ .../canton/console/InteractiveConsole.scala | 162 + canton-3x/community/app/src/pack/bin/canton | 149 + .../community/app/src/pack/bin/canton.bat | 117 + .../examples/01-simple-topology/README.md | 16 + .../01-simple-topology/simple-ping.canton | 33 + .../01-simple-topology/simple-topology.conf | 23 + .../pack/examples/02-global-domain/README.md | 33 + .../global-domain-participant.canton | 13 + .../global-domain-participant.conf | 16 + .../03-advanced-configuration/README.md | 137 + .../api/jwt/certificate.conf | 13 + .../api/jwt/jwks.conf | 9 + .../api/jwt/leeway-parameters.conf | 8 + .../api/jwt/unsafe-hmac256.conf | 8 + .../api/large-in-memory-fan-out.conf | 7 + .../api/large-ledger-api-cache.conf | 8 + .../api/public-admin.conf | 7 + .../03-advanced-configuration/api/public.conf | 11 + .../api/wildcard.conf | 7 + .../nodes/domain1.conf | 18 + .../nodes/participant1.conf | 19 + .../nodes/participant2.conf | 19 + .../nodes/participant3.conf | 19 + .../nodes/participant4.conf | 19 + .../parameters/nonuck.conf | 3 + .../participant-init.canton | 22 + .../remote/domain1.conf | 14 + .../remote/participant1.conf | 14 + .../storage/dbinit.py | 51 + .../03-advanced-configuration/storage/h2.conf | 20 + .../storage/memory.conf | 5 + .../storage/postgres.conf | 37 + .../examples/04-create-daml-app/canton.conf | 45 + .../examples/04-create-daml-app/init.canton | 46 + .../pack/examples/05-composability/README.md | 20 + .../composability-auto-transfer.canton | 91 + .../05-composability/composability.conf | 69 + .../05-composability/composability1.canton | 127 + .../05-composability/composability2.canton | 151 + .../src/pack/examples/06-messaging/README.md | 141 + .../pack/examples/06-messaging/canton.conf | 22 + .../examples/06-messaging/contact/.gitignore | 2 + .../examples/06-messaging/contact/daml.yaml | 12 + .../06-messaging/contact/daml/Contact.daml | 13 + .../contact/daml/Contact.solution | 35 + .../06-messaging/contact/frontend-config.js | 184 + .../pack/examples/06-messaging/init.canton | 58 + .../examples/06-messaging/message/.gitignore | 2 + .../examples/06-messaging/message/daml.yaml | 9 + .../06-messaging/message/daml/Message.daml | 58 + .../06-messaging/message/frontend-config.js | 137 + .../app/src/pack/examples/07-repair/README.md | 38 + .../07-repair/domain-export-ledger.conf | 15 + .../07-repair/domain-import-ledger.conf | 15 + .../07-repair/domain-repair-init.canton | 32 + .../07-repair/domain-repair-lost.conf | 15 + .../examples/07-repair/domain-repair-new.conf | 15 + .../07-repair/enable-preview-commands.conf | 2 + .../07-repair/import-ledger-init.canton | 80 + .../pack/examples/07-repair/participant1.conf | 18 + .../pack/examples/07-repair/participant2.conf | 18 + .../pack/examples/07-repair/participant3.conf | 18 + .../pack/examples/07-repair/participant4.conf | 18 + .../community/app/src/pack/lib/canton.ico | Bin 0 -> 55358 bytes .../src/test/resources/advancedConfDef.env | 4 + .../disable-ammonite-cache.conf | 1 + .../backwards-compatible.conf | 36 + .../new-config-fields-take-precedence.conf | 52 + .../auth-token-config.conf | 4 + .../caching-configs.conf | 23 + ...ommand-service-max-commands-in-flight.conf | 1 + .../console-timeouts.conf | 6 + .../dev-version-support.conf | 20 + .../ledger-api-target-audience.conf | 13 + .../ledger-api-target-scope.conf | 13 + .../logging-event-details.conf | 10 + .../migrate-and-start.conf | 1 + .../documentation-snippets/no-fail-fast.conf | 1 + .../non-standard-config.conf | 1 + .../documentation-snippets/non-uck-mode.conf | 14 + .../documentation-snippets/postgres-ssl.conf | 27 + .../startup-parallelism.conf | 1 + .../storage-queue-size.conf | 1 + .../app/src/test/resources/dummy.crt | 32 + .../community/app/src/test/resources/examples | 1 + .../test/resources/invalid-configs/bort.conf | 7 + .../invalid-configs/duplicate-storage.conf | 17 + .../invalid-configs/include-missing-file.conf | 1 + .../invalid-configs/invalid-node-names.conf | 9 + .../invalid-configs/missing-bracket.conf | 1 + .../invalid-configs/negative-port.conf | 9 + .../invalid-configs/no-manual-start.conf | 1 + .../invalid-configs/require-missing-file.conf | 3 + .../storage-url-with-password.conf | 13 + .../invalid-configs/undefined-env-var.conf | 1 + .../unknown-key-in-nested-config.conf | 6 + .../bootstrap-with-error-dynamic.canton | 20 + .../scripts/bootstrap-with-error.canton | 16 + .../test/resources/scripts/bootstrap.canton | 4 + .../resources/scripts/participant1.canton | 7 + .../app/src/test/resources/scripts/run.canton | 3 + .../src/test/resources/scripts/startup.canton | 5 + .../com/digitalasset/canton/ConfigStubs.scala | 45 + .../admin/api/client/GrpcCtlRunnerTest.scala | 55 + .../admin/api/client/data/MeteringTest.scala | 33 + .../com/digitalasset/canton/cli/CliTest.scala | 125 + .../config/CantonCommunityConfigTest.scala | 368 ++ .../console/AmmoniteCacheLockTest.scala | 53 + .../console/ConsoleCommandResultTest.scala | 51 + .../canton/console/ConsoleMacrosTest.scala | 58 + .../canton/console/ConsoleTest.scala | 309 ++ .../canton/console/HelpTest.scala | 177 + .../CommunityEnvironmentTest.scala | 248 ++ .../canton/environment/NodesTest.scala | 185 + .../canton/health/HealthCheckTest.scala | 258 ++ .../canton/health/HealthServerTest.scala | 25 + ...iseFeatureInCommunityIntegrationTest.scala | 88 + .../tests/ExampleIntegrationTest.scala | 128 + .../tests/release/CliIntegrationTest.scala | 369 ++ .../canton/metrics/MetricsFactoryTest.scala | 25 + .../canton/metrics/MetricsFactoryValues.scala | 91 + .../community/base/src/main/protobuf/buf.yaml | 6 + .../crypto/admin/v0/vault_service.proto | 246 ++ .../canton/crypto/v0/crypto.proto | 199 + .../v0/sequencer_authentication_service.proto | 70 + .../api/v0/sequencer_connect_service.proto | 60 + .../domain/api/v0/sequencer_connection.proto | 32 + .../domain/api/v0/sequencer_service.proto | 212 ++ .../domain/api/v0/service_agreement.proto | 14 + .../domain/api/v1/sequencer_connection.proto | 21 + .../health/admin/v0/status_service.proto | 106 + .../canton/protocol/v0/acs_commitments.proto | 25 + .../canton/protocol/v0/causality.proto | 49 + .../canton/protocol/v0/common.proto | 26 + .../canton/protocol/v0/mediator.proto | 19 + .../protocol/v0/mediator_response.proto | 30 + .../protocol/v0/participant_transaction.proto | 43 + .../protocol/v0/participant_transfer.proto | 15 + .../canton/protocol/v0/sequencing.proto | 78 + .../canton/protocol/v0/storage.proto | 12 + .../canton/protocol/v0/synchronization.proto | 22 + .../canton/protocol/v0/topology.proto | 155 + .../v0/traffic_control_parameters.proto | 20 + .../v0/versioned-google-rpc-status.proto | 17 + .../canton/protocol/v1/common.proto | 26 + .../canton/protocol/v1/mediator.proto | 18 + .../protocol/v1/mediator_response.proto | 32 + .../canton/protocol/v1/merkle.proto | 45 + .../protocol/v1/participant_transaction.proto | 117 + .../protocol/v1/participant_transfer.proto | 54 + .../canton/protocol/v1/sequencing.proto | 76 + .../canton/protocol/v1/signed_content.proto | 20 + .../canton/protocol/v1/synchronization.proto | 21 + .../canton/protocol/v1/topology.proto | 97 + .../canton/protocol/v2/domain_params.proto | 59 + .../canton/protocol/v2/mediator.proto | 34 + .../protocol/v2/mediator_response.proto | 35 + .../protocol/v2/participant_transaction.proto | 84 + .../protocol/v2/participant_transfer.proto | 49 + .../canton/protocol/v2/topology.proto | 331 ++ .../canton/protocol/v3/mediator.proto | 42 + .../protocol/v3/participant_transaction.proto | 43 + .../canton/protocol/v4/synchronization.proto | 37 + .../canton/pruning/admin/v0/pruning.proto | 86 + .../digitalasset/canton/scalapb/package.proto | 14 + .../time/admin/v0/domain_time_service.proto | 35 + .../time/admin/v0/time_tracker_config.proto | 26 + .../canton/time/v0/time_proof.proto | 13 + .../admin/v0/initialization_service.proto | 53 + .../v0/topology_aggregation_service.proto | 100 + .../topology/admin/v0/topology_ext.proto | 29 + .../v0/topology_manager_read_service.proto | 210 ++ .../v0/topology_manager_write_service.proto | 207 ++ .../admin/v1/initialization_service.proto | 65 + .../v1/topology_manager_read_service.proto | 294 ++ .../v1/topology_manager_write_service.proto | 127 + .../traffic/v0/member_traffic_status.proto | 33 + .../canton/v0/trace_context.proto | 14 + .../version/untyped_versioned_message.proto | 25 + .../main/protobuf/google/rpc/package.proto | 13 + .../src/main/resources/rewrite-appender.xml | 373 ++ .../main/resources/rewrite-async-appender.xml | 18 + .../com/daml/nonempty/NonEmptyUtil.scala | 51 + .../canton/ProtoDeserializationError.scala | 97 + .../scala/com/digitalasset/canton/Tags.scala | 139 + .../common/domain/ServiceAgreement.scala | 49 + .../concurrent/DirectExecutionContext.scala | 35 + .../concurrent/ExecutionContextMonitor.scala | 118 + .../ExecutorServiceExtensions.scala | 47 + .../canton/concurrent/FutureSupervisor.scala | 166 + .../concurrent/HasFutureSupervision.scala | 29 + .../concurrent/IdlenessExecutorService.scala | 104 + .../canton/concurrent/SupervisedPromise.scala | 32 + .../canton/concurrent/Threading.scala | 313 ++ .../canton/config/ApiLoggingConfig.scala | 46 + .../canton/config/CacheConfig.scala | 93 + .../canton/config/CantonRequireTypes.scala | 593 +++ .../config/DomainTimeTrackerConfig.scala | 104 + .../canton/config/LoggingConfig.scala | 20 + .../canton/config/ProcessingTimeouts.scala | 86 + .../config/QueryCostMonitoringConfig.scala | 16 + .../config/RefinedNonNegativeDuration.scala | 478 +++ .../config/SequencerConnectionConfig.scala | 64 + .../canton/config/ServerConfig.scala | 384 ++ .../canton/config/StorageConfig.scala | 423 +++ .../canton/config/TestingConfigInternal.scala | 30 + .../config/TimeProofRequestConfig.scala | 78 + .../canton/crypto/CryptoApi.scala | 187 + .../canton/crypto/CryptoKeys.scala | 372 ++ .../canton/crypto/Encryption.scala | 590 +++ .../com/digitalasset/canton/crypto/Hash.scala | 224 ++ .../canton/crypto/HashBuilder.scala | 110 + .../canton/crypto/HashPurpose.scala | 89 + .../com/digitalasset/canton/crypto/Hkdf.scala | 126 + .../com/digitalasset/canton/crypto/Hmac.scala | 210 ++ .../canton/crypto/JavaKeyConverter.scala | 152 + .../digitalasset/canton/crypto/Nonce.scala | 57 + .../digitalasset/canton/crypto/Random.scala | 68 + .../com/digitalasset/canton/crypto/Salt.scala | 167 + .../digitalasset/canton/crypto/Signing.scala | 541 +++ .../canton/crypto/SyncCryptoApiProvider.scala | 642 ++++ .../canton/crypto/X509CertificatePem.scala | 56 + .../crypto/store/CryptoPrivateStore.scala | 168 + .../store/CryptoPrivateStoreExtended.scala | 247 ++ .../crypto/store/CryptoPublicStore.scala | 223 ++ .../store/db/DbCryptoPrivateStore.scala | 272 ++ .../crypto/store/db/DbCryptoPublicStore.scala | 159 + .../memory/InMemoryCryptoPrivateStore.scala | 197 + .../memory/InMemoryCryptoPublicStore.scala | 77 + .../canton/data/ActionDescription.scala | 538 +++ .../canton/data/CantonTimestamp.scala | 129 + .../canton/data/CantonTimestampSecond.scala | 136 + .../canton/data/CommonMetadata.scala | 131 + .../digitalasset/canton/data/Counter.scala | 66 + .../canton/data/FullInformeeTree.scala | 151 + .../canton/data/FullTransactionViewTree.scala | 59 + .../canton/data/GenTransactionTree.scala | 342 ++ .../canton/data/GenTransferViewTree.scala | 91 + .../digitalasset/canton/data/Informee.scala | 108 + .../canton/data/InformeeTree.scala | 177 + .../canton/data/KeyResolution.scala | 94 + .../data/LightTransactionViewTree.scala | 224 ++ .../digitalasset/canton/data/MerkleSeq.scala | 654 ++++ .../digitalasset/canton/data/MerkleTree.scala | 256 ++ .../canton/data/ParticipantMetadata.scala | 109 + .../data/ParticipantTransactionView.scala | 41 + .../digitalasset/canton/data/PeanoQueue.scala | 109 + .../canton/data/PeanoTreeQueue.scala | 158 + .../data/ProcessedDisclosedContract.scala | 54 + .../canton/data/RepairContract.scala | 20 + .../canton/data/SubmitterMetadata.scala | 205 + .../digitalasset/canton/data/Timestamp.scala | 43 + .../canton/data/TransactionSubviews.scala | 129 + .../canton/data/TransactionView.scala | 502 +++ .../canton/data/TransactionViewTree.scala | 110 + .../canton/data/TransferInViewTree.scala | 536 +++ .../canton/data/TransferOutViewTree.scala | 470 +++ .../data/TransferSubmitterMetadata.scala | 29 + .../canton/data/ViewCommonData.scala | 143 + .../canton/data/ViewParticipantData.scala | 483 +++ .../canton/data/ViewPosition.scala | 191 + .../digitalasset/canton/data/ViewTree.scala | 47 + .../digitalasset/canton/data/ViewType.scala | 67 + .../digitalasset/canton/data/Witnesses.scala | 89 + .../environment/CantonNodeParameters.scala | 101 + .../canton/error/CantonError.scala | 272 ++ .../canton/error/CantonErrorGroups.scala | 67 + .../canton/error/DecodedRpcStatus.scala | 147 + .../canton/error/MediatorError.scala | 114 + .../canton/error/TransactionError.scala | 51 + .../canton/external/BackgroundRunner.scala | 393 ++ .../canton/health/AtomicHealthElement.scala | 55 + .../health/CloseableHealthElement.scala | 22 + .../canton/health/ComponentHealthState.scala | 151 + .../canton/health/ComponentStatus.scala | 59 + .../health/CompositeHealthElement.scala | 110 + .../canton/health/HealthComponent.scala | 38 + .../canton/health/HealthElement.scala | 226 ++ .../canton/health/HealthListener.scala | 34 + .../canton/health/HealthService.scala | 64 + .../health/MutableHealthComponent.scala | 109 + .../health/ServiceHealthStatusManager.scala | 15 + .../health/ToComponentHealthState.scala | 13 + .../canton/lifecycle/ClosingException.scala | 21 + .../canton/lifecycle/FlagCloseable.scala | 146 + .../canton/lifecycle/FlagCloseableAsync.scala | 60 + .../lifecycle/FutureUnlessShutdown.scala | 340 ++ .../canton/lifecycle/Lifecycle.scala | 190 + .../canton/lifecycle/OnShutdownRunner.scala | 124 + .../lifecycle/PerformUnlessClosing.scala | 271 ++ .../lifecycle/PromiseUnlessShutdown.scala | 53 + .../lifecycle/ShutdownFailedException.scala | 10 + .../canton/lifecycle/UnlessShutdown.scala | 126 + .../canton/lifecycle/package.scala | 13 + .../canton/logging/LastErrorsAppender.scala | 187 + .../canton/logging/RewritingAppender.scala | 189 + .../logging/pretty/CantonPrettyPrinter.scala | 60 + .../canton/logging/pretty/Pretty.scala | 72 + .../logging/pretty/PrettyInstances.scala | 352 ++ .../logging/pretty/PrettyPrinting.scala | 25 + .../canton/logging/pretty/PrettyUtil.scala | 186 + .../canton/metrics/DbStorageMetrics.scala | 129 + .../digitalasset/canton/metrics/Gauges.scala | 138 + .../canton/metrics/MetricHandle.scala | 49 + .../canton/metrics/MetricsFactoryType.scala | 19 + .../metrics/SequencerClientMetrics.scala | 147 + .../canton/networking/Endpoint.scala | 73 + .../networking/grpc/ApiRequestLogger.scala | 274 ++ .../CantonCommunityServerInterceptors.scala | 60 + .../networking/grpc/CantonGrpcUtil.scala | 264 ++ .../networking/grpc/CantonServerBuilder.scala | 210 ++ .../grpc/ClientChannelBuilder.scala | 164 + .../canton/networking/grpc/GrpcError.scala | 235 ++ .../com/digitalasset/canton/package.scala | 145 + .../canton/protocol/AgreementText.scala | 10 + .../protocol/CantonContractIdVersion.scala | 106 + .../canton/protocol/ConfirmationPolicy.scala | 222 ++ .../canton/protocol/ContractMetadata.scala | 150 + .../canton/protocol/CreatedContract.scala | 107 + .../canton/protocol/DomainParameters.scala | 743 ++++ .../protocol/DomainParametersLookup.scala | 108 + .../protocol/DriverContractMetadata.scala | 57 + .../protocol/GlobalKeySerialization.scala | 64 + .../protocol/HasSerializableContract.scala | 14 + .../canton/protocol/InputContract.scala | 55 + .../canton/protocol/LfHashSyntax.scala | 27 + .../canton/protocol/Phase37Processor.scala | 71 + .../canton/protocol/RefIdentifierSyntax.scala | 21 + .../canton/protocol/ResolvedKey.scala | 34 + .../canton/protocol/RollbackContext.scala | 121 + .../protocol/SerializableContract.scala | 201 + .../SerializableContractWithWitnesses.scala | 16 + .../SerializableDeduplicationPeriod.scala | 39 + .../SerializableRawContractInstance.scala | 118 + .../digitalasset/canton/protocol/Tags.scala | 219 ++ .../canton/protocol/TransferDomainId.scala | 85 + .../digitalasset/canton/protocol/Unicum.scala | 16 + .../protocol/messages/AcsCommitment.scala | 253 ++ .../protocol/messages/CausalityMessage.scala | 143 + .../DomainTopologyTransactionMessage.scala | 290 ++ .../messages/EncryptedViewMessage.scala | 575 +++ .../protocol/messages/EnvelopeContent.scala | 104 + .../protocol/messages/HasDomainId.scala | 10 + .../protocol/messages/HasRequestId.scala | 10 + .../protocol/messages/InformeeMessage.scala | 138 + .../protocol/messages/LocalVerdict.scala | 746 ++++ .../MalformedMediatorRequestResult.scala | 146 + .../protocol/messages/MediatorRequest.scala | 55 + .../protocol/messages/MediatorResponse.scala | 306 ++ .../protocol/messages/MediatorResult.scala | 34 + .../protocol/messages/ProtocolMessage.scala | 81 + .../RegisterTopologyTransactionRequest.scala | 121 + .../RegisterTopologyTransactionResponse.scala | 187 + .../protocol/messages/RootHashMessage.scala | 204 + .../messages/RootHashMessageRecipients.scala | 184 + .../messages/SignedProtocolMessage.scala | 229 ++ .../SignedProtocolMessageContent.scala | 51 + .../messages/TransactionResultMessage.scala | 139 + .../messages/TransferInMediatorMessage.scala | 130 + .../messages/TransferOutMediatorMessage.scala | 132 + .../protocol/messages/TransferResult.scala | 239 ++ .../TypedSignedProtocolMessageContent.scala | 115 + .../canton/protocol/messages/Verdict.scala | 219 ++ .../canton/protocol/messages/package.scala | 35 + .../canton/protocol/package.scala | 121 + .../canton/pruning/PruningPhase.scala | 73 + .../resource/DatabaseStorageError.scala | 64 + .../canton/resource/DbMigrations.scala | 345 ++ .../canton/resource/DbStorageSingle.scala | 181 + .../canton/resource/DbVersionCheck.scala | 183 + .../canton/resource/Storage.scala | 941 +++++ .../resource/TransactionalStoreUpdate.scala | 129 + .../sequencing/ApplicationHandler.scala | 161 + .../canton/sequencing/AsyncResult.scala | 50 + .../canton/sequencing/EnvelopeBox.scala | 118 + .../canton/sequencing/HandlerResult.scala | 55 + .../SequencedEventMonotonicityChecker.scala | 126 + .../sequencing/SequencerAggregator.scala | 295 ++ .../sequencing/SequencerAggregatorPekko.scala | 311 ++ .../sequencing/SequencerClientRecorder.scala | 68 + .../sequencing/SequencerConnection.scala | 207 ++ .../sequencing/SequencerConnections.scala | 193 + .../sequencing/TrafficControlParameters.scala | 74 + .../authentication/AuthenticationToken.scala | 73 + .../AuthenticationTokenProvider.scala | 177 + .../authentication/MemberAuthentication.scala | 157 + .../grpc/AuthenticationTokenManager.scala | 150 + .../authentication/grpc/Constant.scala | 38 + .../grpc/SequencerClientAuthentication.scala | 215 ++ .../client/DelayedSequencerClient.scala | 77 + .../client/PeriodicAcknowledgements.scala | 123 + .../sequencing/client/ReplayConfig.scala | 64 + .../sequencing/client/RequestSigner.scala | 60 + .../ResilientSequencerSubscriberPekko.scala | 311 ++ .../ResilientSequencerSubscription.scala | 407 ++ .../client/SendAsyncClientError.scala | 38 + .../sequencing/client/SendCallback.scala | 44 + .../canton/sequencing/client/SendResult.scala | 79 + .../sequencing/client/SendTracker.scala | 286 ++ .../canton/sequencing/client/SendType.scala | 24 + .../client/SequencedEventValidator.scala | 747 ++++ .../sequencing/client/SequencerClient.scala | 1352 +++++++ .../client/SequencerClientConfig.scala | 61 + .../client/SequencerClientFactory.scala | 343 ++ .../client/SequencerClientSend.scala | 62 + .../SequencerClientSubscriptionError.scala | 52 + ...SequencerClientSubscriptionException.scala | 11 + .../SequencerClientTransportFactory.scala | 181 + .../client/SequencerSubscription.scala | 119 + .../client/SequencerSubscriptionPekko.scala | 24 + .../client/SequencerTransportState.scala | 365 ++ .../client/SubscriptionErrorRetryPolicy.scala | 54 + .../SubscriptionErrorRetryPolicyPekko.scala | 32 + .../client/SubscriptionRetryDelayRule.scala | 43 + .../grpc/GrpcSequencerChannelBuilder.scala | 36 + .../canton/sequencing/client/package.scala | 17 + .../transports/GrpcSequencerClientAuth.scala | 99 + .../GrpcSequencerClientTransport.scala | 386 ++ .../GrpcSequencerClientTransportPekko.scala | 209 ++ .../GrpcSequencerSubscription.scala | 299 ++ .../GrpcSubscriptionErrorRetryPolicy.scala | 84 + ...rpcSubscriptionErrorRetryPolicyPekko.scala | 31 + .../transports/SequencerClientTransport.scala | 78 + .../SequencerClientTransportPekko.scala | 36 + ...layingEventsSequencerClientTransport.scala | 174 + ...playingSendsSequencerClientTransport.scala | 537 +++ .../CleanSequencerCounterTracker.scala | 114 + .../sequencing/handlers/CounterCapture.scala | 48 + .../handlers/HasReceivedEvent.scala | 38 + .../handlers/StoreSequencedEvent.scala | 59 + .../ThrottlingApplicationEventHandler.scala | 61 + .../handshake/HandshakeRequestError.scala | 10 + .../handshake/SequencerHandshake.scala | 113 + .../handshake/SupportsHandshake.scala | 21 + .../canton/sequencing/package.scala | 118 + .../protocol/AcknowledgeRequest.scala | 73 + .../sequencing/protocol/AggregationId.scala | 37 + .../sequencing/protocol/AggregationRule.scala | 99 + .../canton/sequencing/protocol/Batch.scala | 185 + .../sequencing/protocol/ClosedEnvelope.scala | 225 ++ .../canton/sequencing/protocol/Envelope.scala | 25 + .../protocol/HandshakeRequest.scala | 42 + .../protocol/HandshakeResponse.scala | 48 + .../sequencing/protocol/MessageId.scala | 30 + .../sequencing/protocol/OpenEnvelope.scala | 51 + .../sequencing/protocol/Recipient.scala | 191 + .../sequencing/protocol/Recipients.scala | 107 + .../sequencing/protocol/RecipientsTree.scala | 118 + .../sequencing/protocol/SendAsyncError.scala | 188 + .../sequencing/protocol/SequencedEvent.scala | 379 ++ .../protocol/SequencedEventTrafficState.scala | 51 + .../protocol/SequencerDeliverError.scala | 204 + .../sequencing/protocol/SignedContent.scala | 268 ++ .../protocol/SubmissionRequest.scala | 284 ++ .../protocol/SubscriptionRequest.scala | 63 + .../protocol/SubscriptionResponse.scala | 33 + .../TopologyStateForInitRequest.scala | 58 + .../TopologyStateForInitResponse.scala | 42 + .../sequencing/protocol/TrafficState.scala | 68 + .../serialization/DeterministicEncoding.scala | 277 ++ .../HasCryptographicEvidence.scala | 131 + .../canton/serialization/ProtoConverter.scala | 195 + .../canton/store/CursorPreheadStore.scala | 67 + .../canton/store/IndexedStringStore.scala | 210 ++ .../canton/store/PrunableByTime.scala | 57 + .../canton/store/SendTrackerStore.scala | 57 + .../canton/store/SequencedEventStore.scala | 398 ++ .../store/SequencerCounterTrackerStore.scala | 65 + .../canton/store/SessionKeyStore.scala | 166 + .../store/db/DbCursorPreheadStore.scala | 162 + .../canton/store/db/DbExceptions.scala | 19 + .../store/db/DbIndexedStringStore.scala | 75 + .../canton/store/db/DbPrunableByTime.scala | 148 + .../store/db/DbSequencedEventStore.scala | 418 +++ .../db/DbSequencerCounterTrackerStore.scala | 38 + .../canton/store/db/RequiredTypesCodec.scala | 53 + .../db/SequencerClientDiscriminator.scala | 52 + .../memory/InMemoryCursorPreheadStore.scala | 64 + .../memory/InMemoryIndexedStringStore.scala | 63 + .../store/memory/InMemoryPrunableByTime.scala | 54 + .../memory/InMemorySendTrackerStore.scala | 45 + .../memory/InMemorySequencedEventStore.scala | 240 ++ ...InMemorySequencerCounterTrackerStore.scala | 20 + .../com/digitalasset/canton/time/Clock.scala | 518 +++ .../canton/time/DomainTimeTracker.scala | 490 +++ .../canton/time/PeriodicAction.scala | 35 + .../canton/time/RefinedDurations.scala | 285 ++ .../canton/time/TimeAwaiter.scala | 118 + .../digitalasset/canton/time/TimeProof.scala | 162 + .../time/TimeProofRequestSubmitter.scala | 161 + .../canton/topology/DomainOutboxQueue.scala | 69 + .../canton/topology/Identifier.scala | 185 + .../canton/topology/KeyCollection.scala | 36 + .../canton/topology/MediatorRef.scala | 73 + .../digitalasset/canton/topology/Member.scala | 448 +++ .../canton/topology/TopologyManager.scala | 634 ++++ .../topology/TopologyManagerError.scala | 493 +++ .../topology/TopologyManagerStatus.scala | 17 + .../canton/topology/TopologyManagerX.scala | 426 +++ .../topology/TopologyStateProcessorX.scala | 420 +++ .../client/CachingDomainTopologyClient.scala | 615 +++ .../client/DomainTrafficStateClient.scala | 24 + .../IdentityProvidingServiceClient.scala | 835 +++++ .../StoreBasedDomainTopologyClient.scala | 919 +++++ .../StoreBasedDomainTopologyClientX.scala | 720 ++++ .../processing/AuthorizationChainX.scala | 54 + .../processing/AuthorizationGraph.scala | 340 ++ .../processing/AuthorizationGraphX.scala | 458 +++ ...nTopologyTransactionMessageValidator.scala | 280 ++ ...ogyTransactionAuthorizationValidator.scala | 359 ++ ...gyTransactionAuthorizationValidatorX.scala | 380 ++ .../SnapshotAuthorizationValidator.scala | 192 + .../processing/TerminateProcessing.scala | 39 + .../topology/processing/TopologyTimes.scala | 46 + .../TopologyTimestampPlusEpsilonTracker.scala | 427 +++ .../TopologyTransactionProcessor.scala | 539 +++ .../TopologyTransactionProcessorCommon.scala | 359 ++ .../TopologyTransactionProcessorX.scala | 238 ++ .../TopologyTransactionSubscriber.scala | 55 + .../TransactionAuthorizationValidator.scala | 240 ++ .../TransactionAuthorizationValidatorX.scala | 325 ++ ...pologyStateForInititalizationService.scala | 80 + .../canton/topology/store/TopologyStore.scala | 947 +++++ .../topology/store/TopologyStoreX.scala | 380 ++ .../store/TopologyTransactionCollection.scala | 298 ++ .../TopologyTransactionCollectionX.scala | 195 + .../topology/store/db/DbTopologyStore.scala | 1022 +++++ .../topology/store/db/DbTopologyStoreX.scala | 714 ++++ .../store/memory/InMemoryTopologyStore.scala | 621 ++++ .../store/memory/InMemoryTopologyStoreX.scala | 516 +++ .../transaction/ParticipantAttributes.scala | 144 + .../SignedTopologyTransaction.scala | 203 + .../SignedTopologyTransactionX.scala | 237 ++ .../transaction/TopologyMapping.scala | 644 ++++ .../transaction/TopologyMappingX.scala | 1552 ++++++++ .../transaction/TopologyMappingXChecks.scala | 226 ++ .../transaction/TopologyTransaction.scala | 514 +++ .../transaction/TopologyTransactionX.scala | 197 + .../topology/transaction/UniquePath.scala | 146 + .../tracing/SerializableTraceContext.scala | 105 + .../canton/tracing/Spanning.scala | 126 + .../canton/traffic/EventCostCalculator.scala | 48 + .../canton/util/BinaryFileUtil.scala | 55 + .../canton/util/ByteStringUtil.scala | 103 + .../digitalasset/canton/util/ChainUtil.scala | 21 + .../digitalasset/canton/util/Checked.scala | 292 ++ .../digitalasset/canton/util/CheckedT.scala | 346 ++ .../digitalasset/canton/util/DelayUtil.scala | 101 + .../canton/util/EitherTUtil.scala | 182 + .../digitalasset/canton/util/EitherUtil.scala | 53 + .../digitalasset/canton/util/ErrorUtil.scala | 131 + .../canton/util/FutureInstances.scala | 39 + .../digitalasset/canton/util/FutureUtil.scala | 132 + .../canton/util/HasFlushFuture.scala | 120 + .../digitalasset/canton/util/HexString.scala | 54 + .../canton/util/LazyValWithContext.scala | 56 + .../canton/util/LengthLimitedByteString.scala | 173 + .../canton/util/LfTransactionUtil.scala | 224 ++ .../digitalasset/canton/util/LoggerUtil.scala | 114 + .../digitalasset/canton/util/MapsUtil.scala | 243 ++ .../canton/util/MessageRecorder.scala | 126 + .../digitalasset/canton/util/MonadUtil.scala | 125 + .../com/digitalasset/canton/util/NoCopy.scala | 11 + .../digitalasset/canton/util/OptionUtil.scala | 79 + .../canton/util/OrderedBucketMergeHub.scala | 1174 ++++++ .../digitalasset/canton/util/PekkoUtil.scala | 776 ++++ .../util/PriorityBlockingQueueUtil.scala | 10 + .../canton/util/ResourceUtil.scala | 92 + .../digitalasset/canton/util/ShowUtil.scala | 153 + .../canton/util/SimpleExecutionQueue.scala | 347 ++ .../canton/util/SingleUseCell.scala | 36 + .../canton/util/StackTraceUtil.scala | 32 + .../digitalasset/canton/util/Thereafter.scala | 166 + .../canton/util/TrieMapUtil.scala | 40 + .../digitalasset/canton/util/TryUtil.scala | 27 + .../digitalasset/canton/util/package.scala | 21 + .../canton/util/retry/Jitter.scala | 135 + .../digitalasset/canton/util/retry/LICENSE | 20 + .../canton/util/retry/Policy.scala | 525 +++ .../canton/util/retry/RetryEither.scala | 95 + .../canton/util/retry/RetryUtil.scala | 271 ++ .../canton/util/retry/Success.scala | 42 + .../canton/util/retry/package.scala | 8 + .../version/HasProtocolVersionedWrapper.scala | 916 +++++ .../version/HasVersionedToByteString.scala | 25 + .../canton/version/HasVersionedWrapper.scala | 274 ++ .../canton/version/ProtocolVersion.scala | 336 ++ .../canton/version/VersionedMessage.scala | 29 + .../digitalasset/canton/version/version.scala | 22 + .../concurrent/BatchingExecutorCanton.scala | 8 + .../slick/util/AsyncExecutorWithMetrics.scala | 454 +++ .../util/AsyncExecutorWithShutdown.scala | 8 + .../src/main/scala/slick/util/LICENSE.txt | 25 + .../ledger/javaapi/data/ActiveContracts.java | 55 + .../ledger/javaapi/data/ArchivedEvent.java | 105 + .../com/daml/ledger/javaapi/data/Bool.java | 58 + .../daml/ledger/javaapi/data/Checkpoint.java | 68 + .../com/daml/ledger/javaapi/data/Command.java | 73 + .../javaapi/data/CommandsSubmission.java | 278 ++ .../javaapi/data/CompletionEndResponse.java | 44 + .../javaapi/data/CompletionStreamRequest.java | 114 + .../data/CompletionStreamResponse.java | 78 + .../daml/ledger/javaapi/data/Contract.java | 6 + .../ledger/javaapi/data/ContractFilter.java | 63 + .../daml/ledger/javaapi/data/ContractId.java | 44 + .../ledger/javaapi/data/ContractMetadata.java | 84 + .../data/CreateAndExerciseCommand.java | 95 + .../ledger/javaapi/data/CreateCommand.java | 69 + .../javaapi/data/CreateUserRequest.java | 62 + .../javaapi/data/CreateUserResponse.java | 49 + .../ledger/javaapi/data/CreatedEvent.java | 361 ++ .../ledger/javaapi/data/DamlCollectors.java | 64 + .../daml/ledger/javaapi/data/DamlEnum.java | 76 + .../daml/ledger/javaapi/data/DamlGenMap.java | 89 + .../daml/ledger/javaapi/data/DamlList.java | 87 + .../com/daml/ledger/javaapi/data/DamlMap.java | 17 + .../ledger/javaapi/data/DamlOptional.java | 80 + .../daml/ledger/javaapi/data/DamlRecord.java | 181 + .../daml/ledger/javaapi/data/DamlTextMap.java | 90 + .../com/daml/ledger/javaapi/data/Date.java | 47 + .../com/daml/ledger/javaapi/data/Decimal.java | 16 + .../javaapi/data/DeleteUserRequest.java | 45 + .../javaapi/data/DeleteUserResponse.java | 24 + .../javaapi/data/DisclosedContract.java | 27 + .../com/daml/ledger/javaapi/data/Event.java | 56 + .../daml/ledger/javaapi/data/EventUtils.java | 33 + .../javaapi/data/ExerciseByKeyCommand.java | 100 + .../ledger/javaapi/data/ExerciseCommand.java | 101 + .../ledger/javaapi/data/ExercisedEvent.java | 216 ++ .../com/daml/ledger/javaapi/data/Filter.java | 97 + .../ledger/javaapi/data/FiltersByParty.java | 71 + .../data/GetActiveContractsRequest.java | 85 + .../data/GetActiveContractsResponse.java | 90 + .../data/GetEventsByContractIdResponse.java | 37 + .../data/GetEventsByContractKeyResponse.java | 47 + .../data/GetFlatTransactionResponse.java | 51 + .../javaapi/data/GetLedgerEndResponse.java | 52 + .../javaapi/data/GetPackageResponse.java | 64 + .../data/GetPackageStatusResponse.java | 49 + .../javaapi/data/GetTransactionResponse.java | 51 + .../data/GetTransactionTreesResponse.java | 64 + .../javaapi/data/GetTransactionsRequest.java | 71 + .../javaapi/data/GetTransactionsResponse.java | 64 + .../ledger/javaapi/data/GetUserRequest.java | 45 + .../ledger/javaapi/data/GetUserResponse.java | 48 + .../javaapi/data/GrantUserRightsRequest.java | 57 + .../javaapi/data/GrantUserRightsResponse.java | 50 + .../daml/ledger/javaapi/data/Identifier.java | 115 + .../ledger/javaapi/data/InclusiveFilter.java | 156 + .../com/daml/ledger/javaapi/data/Int64.java | 44 + .../ledger/javaapi/data/LedgerOffset.java | 126 + .../javaapi/data/ListUserRightsRequest.java | 45 + .../javaapi/data/ListUserRightsResponse.java | 48 + .../ledger/javaapi/data/ListUsersRequest.java | 55 + .../javaapi/data/ListUsersResponse.java | 48 + .../daml/ledger/javaapi/data/NoFilter.java | 18 + .../com/daml/ledger/javaapi/data/Numeric.java | 51 + .../com/daml/ledger/javaapi/data/Party.java | 46 + .../com/daml/ledger/javaapi/data/Record.java | 87 + .../javaapi/data/RevokeUserRightsRequest.java | 57 + .../data/RevokeUserRightsResponse.java | 50 + .../javaapi/data/SubmitAndWaitRequest.java | 146 + .../javaapi/data/SubmitCommandsRequest.java | 571 +++ .../ledger/javaapi/data/SubmitRequest.java | 146 + .../daml/ledger/javaapi/data/Template.java | 47 + .../com/daml/ledger/javaapi/data/Text.java | 46 + .../daml/ledger/javaapi/data/Timestamp.java | 92 + .../daml/ledger/javaapi/data/Transaction.java | 141 + .../javaapi/data/TransactionFilter.java | 44 + .../ledger/javaapi/data/TransactionTree.java | 167 + .../daml/ledger/javaapi/data/TreeEvent.java | 56 + .../com/daml/ledger/javaapi/data/Unit.java | 32 + .../data/UnsupportedEventTypeException.java | 10 + .../ledger/javaapi/data/UpdateSubmission.java | 247 ++ .../com/daml/ledger/javaapi/data/User.java | 165 + .../com/daml/ledger/javaapi/data/Value.java | 146 + .../com/daml/ledger/javaapi/data/Variant.java | 98 + .../ledger/javaapi/data/WorkflowEvent.java | 10 + .../ledger/javaapi/data/codegen/ByKey.java | 57 + .../ledger/javaapi/data/codegen/Choice.java | 53 + .../ledger/javaapi/data/codegen/Contract.java | 106 + .../data/codegen/ContractCompanion.java | 214 ++ .../javaapi/data/codegen/ContractDecoder.java | 52 + .../javaapi/data/codegen/ContractId.java | 97 + .../data/codegen/ContractTypeCompanion.java | 89 + .../codegen/ContractWithInterfaceView.java | 33 + .../javaapi/data/codegen/ContractWithKey.java | 65 + .../javaapi/data/codegen/CreateAnd.java | 57 + .../ledger/javaapi/data/codegen/Created.java | 33 + .../ledger/javaapi/data/codegen/DamlEnum.java | 16 + .../javaapi/data/codegen/DamlRecord.java | 24 + .../javaapi/data/codegen/DefinedDataType.java | 52 + .../javaapi/data/codegen/Exercised.java | 32 + .../javaapi/data/codegen/Exercises.java | 37 + .../javaapi/data/codegen/HasCommands.java | 21 + .../data/codegen/InterfaceCompanion.java | 101 + .../data/codegen/PrimitiveValueDecoders.java | 218 ++ .../ledger/javaapi/data/codegen/Update.java | 112 + .../javaapi/data/codegen/ValueDecoder.java | 74 + .../ledger/javaapi/data/codegen/Variant.java | 16 + .../data/codegen/json/JsonLfDecoder.java | 30 + .../data/codegen/json/JsonLfDecoders.java | 338 ++ .../data/codegen/json/JsonLfEncoder.java | 11 + .../data/codegen/json/JsonLfEncoders.java | 220 ++ .../data/codegen/json/JsonLfReader.java | 238 ++ .../data/codegen/json/JsonLfWriter.java | 92 + .../data/codegen/json/JsonLfDecodersTest.java | 722 ++++ .../data/codegen/json/JsonLfEncodersTest.java | 293 ++ .../data/codegen/json/TestHelpers.java | 212 ++ .../src/test/resources/logback-test.xml | 21 + .../ledger/javaapi/data/CommandSpec.scala | 22 + .../ledger/javaapi/data/DamlRecordSpec.scala | 70 + .../daml/ledger/javaapi/data/EventSpec.scala | 87 + .../daml/ledger/javaapi/data/Generators.scala | 408 ++ .../data/GetActiveContractsRequestSpec.scala | 23 + .../data/SubmitCommandsRequestSpec.scala | 189 + .../ledger/javaapi/data/TimestampSpec.scala | 35 + .../javaapi/data/TransactionFilterSpec.scala | 22 + .../daml/ledger/javaapi/data/ValueSpec.scala | 107 + canton-3x/community/common/.gitignore | 1 + .../daml/CantonExamples/CantonExamples.daml | 11 + .../daml/CantonExamples/ContractKeys.daml | 193 + .../main/daml/CantonExamples/Divulgence.daml | 41 + .../src/main/daml/CantonExamples/Iou.daml | 113 + .../src/main/daml/CantonExamples/LockIou.daml | 42 + .../src/main/daml/CantonExamples/Paint.daml | 112 + .../main/daml/CantonExamples/SafePaint.daml | 76 + .../src/main/daml/CantonExamples/Swap.daml | 46 + .../src/main/daml/CantonExamples/daml.yaml | 10 + .../migration/canton/h2/dev/V999__dev.sha256 | 1 + .../db/migration/canton/h2/dev/V999__dev.sql | 146 + .../canton/h2/dev/reference/V998__blocks.sql | 9 + .../canton/h2/stable/V1_1__initial.sha256 | 1 + .../canton/h2/stable/V1_1__initial.sql | 874 +++++ .../h2/stable/V2__changes_for_2.3.sha256 | 1 + .../canton/h2/stable/V2__changes_for_2.3.sql | 20 + .../h2/stable/V3__changes_for_2.4.sha256 | 1 + .../canton/h2/stable/V3__changes_for_2.4.sql | 66 + .../h2/stable/V4__changes_for_2.5.sha256 | 1 + .../canton/h2/stable/V4__changes_for_2.5.sql | 12 + .../h2/stable/V5__changes_for_2.6.sha256 | 1 + .../canton/h2/stable/V5__changes_for_2.6.sql | 13 + .../h2/stable/V6__changes_for_2.7.sha256 | 1 + .../canton/h2/stable/V6__changes_for_2.7.sql | 43 + .../canton/h2/stable/V7__changes_for_2.8.sql | 53 + .../canton/h2/stable/V8__changes_for_3.0.sql | 4 + .../canton/postgres/dev/V999__dev.sha256 | 1 + .../canton/postgres/dev/V999__dev.sql | 148 + .../postgres/dev/reference/V998__blocks.sql | 9 + .../postgres/stable/V1_1__initial.sha256 | 1 + .../canton/postgres/stable/V1_1__initial.sql | 886 +++++ .../stable/V2__changes_for_2.3.sha256 | 1 + .../postgres/stable/V2__changes_for_2.3.sql | 20 + .../stable/V3__changes_for_2.4.sha256 | 1 + .../postgres/stable/V3__changes_for_2.4.sql | 35 + .../stable/V4__changes_for_2.5.sha256 | 1 + .../postgres/stable/V4__changes_for_2.5.sql | 15 + .../stable/V5__changes_for_2.6.sha256 | 1 + .../postgres/stable/V5__changes_for_2.6.sql | 13 + .../stable/V6__changes_for_2.7.sha256 | 1 + .../postgres/stable/V6__changes_for_2.7.sql | 43 + .../postgres/stable/V7__changes_for_2.8.sql | 52 + .../postgres/stable/V8__changes_for_3.0.sql | 4 + .../migration/canton/recompute-sha256sums.sh | 12 + .../main/scala/com/daml/lf/CantonOnly.scala | 72 + .../admin/grpc/GrpcPruningScheduler.scala | 150 + ...sedRegisterTopologyTransactionHandle.scala | 336 ++ .../domain/SequencerConnectClient.scala | 102 + .../grpc/GrpcSequencerConnectClient.scala | 210 ++ .../SequencerConnectClientInterceptor.scala | 30 + .../domain/grpc/SequencerInfoLoader.scala | 336 ++ .../canton/config/BatchAggregatorConfig.scala | 40 + .../canton/config/CantonConfigUtil.scala | 39 + .../canton/config/ConfigDefaults.scala | 68 + .../canton/config/CryptoConfig.scala | 54 + .../canton/config/CryptoProvider.scala | 129 + .../canton/config/DbCacheConfig.scala | 22 + .../canton/config/DeprecatedConfigUtils.scala | 148 + .../canton/config/InitConfig.scala | 59 + .../canton/config/LocalNodeConfig.scala | 73 + .../canton/config/MonitoringConfig.scala | 54 + .../canton/config/ProtocolConfig.scala | 12 + .../canton/config/TopologyXConfig.scala | 24 + .../digitalasset/canton/crypto/Blake2xb.scala | 33 + .../canton/crypto/CryptoFactory.scala | 323 ++ .../crypto/CryptoHandshakeValidator.scala | 69 + .../digitalasset/canton/crypto/LtHash16.scala | 90 + .../crypto/admin/grpc/GrpcVaultService.scala | 368 ++ .../SP800HashDRBGSecureRandom.scala | 165 + .../crypto/provider/CryptoKeyConverter.scala | 95 + .../provider/jce/JceJavaConverter.scala | 228 ++ .../provider/jce/JcePrivateCrypto.scala | 155 + .../crypto/provider/jce/JcePureCrypto.scala | 885 +++++ .../provider/jce/JceSecurityProvider.scala | 16 + .../provider/tink/TinkJavaConverter.scala | 396 ++ .../crypto/provider/tink/TinkKeyFormat.scala | 72 + .../provider/tink/TinkPrivateCrypto.scala | 161 + .../crypto/provider/tink/TinkPureCrypto.scala | 355 ++ .../canton/data/ConcurrentHMap.scala | 66 + .../canton/data/TaskScheduler.scala | 358 ++ .../data/TransactionViewDecomposition.scala | 112 + .../TransactionViewDecompositionFactory.scala | 185 + .../canton/environment/BootstrapStage.scala | 350 ++ .../canton/environment/CantonNode.scala | 14 + .../environment/CantonNodeBootstrap.scala | 526 +++ .../CantonNodeBootstrapCommon.scala | 345 ++ .../environment/CantonNodeBootstrapX.scala | 456 +++ ...DomainTopologyInitializationCallback.scala | 96 + .../environment/NodeFactoryArguments.scala | 66 + .../canton/health/GrpcHealthReporter.scala | 92 + .../canton/health/GrpcHealthServer.scala | 63 + .../canton/health/admin/data/NodeStatus.scala | 478 +++ .../health/admin/grpc/GrpcStatusService.scala | 80 + .../canton/lifecycle/StartAndCloseable.scala | 69 + .../logging/CantonFilterEvaluator.scala | 43 + .../canton/logging/CantonJsonEncoder.scala | 20 + .../logging/ThrottleFilterEvaluator.scala | 82 + .../canton/metrics/MetricDoc.scala | 376 ++ .../canton/metrics/MetricsHelper.scala | 26 + .../digitalasset/canton/metrics/package.scala | 20 + .../grpc/ErrorLoggingStreamObserver.scala | 30 + .../grpc/ForwardingStreamObserver.scala | 28 + .../networking/grpc/GrpcDynamicService.scala | 90 + .../grpc/RecordingStreamObserver.scala | 45 + .../networking/grpc/StaticGrpcServices.scala | 75 + .../canton/protocol/ContractSalt.scala | 53 + .../canton/protocol/PackageInfoService.scala | 36 + .../canton/protocol/StoredParties.scala | 52 + .../canton/protocol/TransactionMetadata.scala | 46 + .../canton/protocol/UnicumGenerator.scala | 261 ++ .../protocol/WellFormedTransaction.scala | 647 ++++ .../canton/protocol/WithContractHash.scala | 27 + .../canton/protocol/WithTransactionId.scala | 24 + .../messages/ConfirmationRequest.scala | 71 + .../canton/resource/IdempotentInsert.scala | 112 + .../canton/resource/StorageDebug.scala | 149 + .../canton/scheduler/Schedule.scala | 127 + .../canton/scheduler/Scheduler.scala | 62 + .../canton/scheduler/Schedulers.scala | 14 + .../canton/sequencing/DelayLogger.scala | 54 + .../GrpcSequencerAuthenticationSupport.scala | 14 + .../handlers/DiscardIgnoredEvents.scala | 71 + .../sequencing/handlers/EnvelopeOpener.scala | 53 + .../sequencing/handlers/StripSignature.scala | 48 + .../sequencing/protocol/VerifyActive.scala | 37 + .../sequencing/protocol/WithRecipients.scala | 23 + .../canton/store/PruningSchedulerStore.scala | 68 + .../store/db/DbBulkUpdateProcessor.scala | 172 + .../store/db/DbPruningSchedulerStore.scala | 144 + .../store/db/DbSendTrackerStore_Unused.scala | 103 + .../InMemoryPruningSchedulerStore.scala | 67 + .../canton/time/GrpcDomainTimeService.scala | 175 + .../digitalasset/canton/time/HasUptime.scala | 16 + .../canton/time/TestingTimeService.scala | 35 + .../topology/DomainOutboxDispatchHelper.scala | 327 ++ .../canton/topology/DomainOutboxStatus.scala | 16 + .../digitalasset/canton/topology/NodeId.scala | 7 + .../PartyToParticipantComputations.scala | 47 + .../topology/QueueBasedDomainOutboxX.scala | 346 ++ .../topology/StoreBasedDomainOutbox.scala | 718 ++++ .../GrpcIdentityInitializationServiceX.scala | 61 + .../grpc/GrpcInitializationService.scala | 81 + .../grpc/GrpcTopologyAggregationService.scala | 227 ++ .../grpc/GrpcTopologyManagerReadService.scala | 512 +++ .../GrpcTopologyManagerReadServiceX.scala | 662 ++++ .../GrpcTopologyManagerWriteService.scala | 246 ++ .../GrpcTopologyManagerWriteServiceX.scala | 219 ++ .../topology/store/DomainTopologyStore.scala | 83 + .../topology/store/InitializationStore.scala | 119 + .../canton/tracing/BatchTracing.scala | 15 + .../canton/traffic/MemberTrafficStatus.scala | 65 + .../canton/traffic/TopUpEvent.scala | 69 + .../canton/traffic/TopUpQueue.scala | 119 + .../canton/util/BatchAggregator.scala | 281 ++ .../canton/util/DamlPackageLoader.scala | 62 + .../canton/util/HasReadWriteLock.scala | 30 + .../com/digitalasset/canton/util/IdUtil.scala | 28 + .../canton/util/IterableUtil.scala | 157 + .../canton/util/OptionUtils.scala | 15 + .../digitalasset/canton/util/PathUtils.scala | 19 + .../digitalasset/canton/util/RangeUtil.scala | 29 + .../canton/util/RateLimiter.scala | 65 + .../digitalasset/canton/util/SeqUtil.scala | 43 + .../digitalasset/canton/util/SetsUtil.scala | 12 + .../canton/util/SnapshottableList.scala | 38 + .../canton/util/TextFileUtil.scala | 23 + .../com/digitalasset/canton/util/UByte.scala | 48 + .../canton/version/CantonVersion.scala | 115 + .../DamlLfVersionToProtocolVersions.scala | 33 + .../ProtocolVersionCompatibility.scala | 291 ++ .../ReleaseVersionToProtocolVersions.scala | 53 + .../canton/version/ReleaseVersions.scala | 18 + .../canton/annotations/UnstableTest.java | 27 + .../sequencing/protocol/RecipientTest.scala | 102 + .../sequencing/protocol/RecipientsTest.scala | 154 + .../protocol/RecipientsTreeTest.scala | 65 + .../digitalasset/canton/scalapb/package.proto | 14 + .../com/digitalasset/canton/test/hello.proto | 21 + .../canton/test/parsing-attack.proto | 79 + .../digitalasset/canton/test/sequencing.proto | 16 + .../canton/test/versioned-messages.proto | 26 + .../test/resources/blake2xb-golden-tests.txt | 256 ++ .../src/test/resources/logback-test.xml | 63 + .../src/test/resources/tls/participant.pem | 52 + .../com/digitalasset/canton/CheckedTest.scala | 35 + .../canton/ComparesLfTransactions.scala | 103 + .../canton/DefaultDamlValues.scala | 66 + .../com/digitalasset/canton/Generators.scala | 54 + .../digitalasset/canton/GeneratorsLf.scala | 67 + .../digitalasset/canton/HasActorSystem.scala | 25 + .../canton/NeedsNewLfContractIds.scala | 18 + .../canton/SequentialTestByKey.scala | 46 + .../digitalasset/canton/UnstableTest.scala | 12 + .../DirectExecutionContextTest.scala | 77 + .../ExecutionContextMonitorTest.scala | 89 + .../IdlenessExecutorServiceTest.scala | 146 + .../canton/concurrent/ThreadingTest.scala | 354 ++ .../canton/config/DbConfigTest.scala | 144 + .../config/DeprecatedConfigUtilsTest.scala | 121 + .../canton/config/GeneratorsConfig.scala | 54 + .../config/LengthLimitedStringTest.scala | 44 + .../LengthLimitedStringWrapperTest.scala | 91 + .../RefinedNonNegativeDurationTest.scala | 312 ++ .../canton/crypto/Blake2xbTest.scala | 47 + ...CryptoPureApiCantonCompatibilityTest.scala | 46 + .../canton/crypto/EncodableString.scala | 12 + .../canton/crypto/GeneratorsCrypto.scala | 110 + .../canton/crypto/HashBuilderTest.scala | 64 + .../canton/crypto/HashPurposeTest.scala | 31 + .../digitalasset/canton/crypto/HashTest.scala | 32 + .../digitalasset/canton/crypto/HkdfTest.scala | 123 + .../digitalasset/canton/crypto/HmacTest.scala | 34 + .../crypto/JavaPublicKeyConverterTest.scala | 139 + .../canton/crypto/LtHash16Test.scala | 63 + .../digitalasset/canton/crypto/SaltTest.scala | 44 + .../digitalasset/canton/crypto/TestSalt.scala | 21 + .../SP800HashDRBGSecureRandomTest.scala | 37 + .../crypto/provider/jce/JceCryptoTest.scala | 109 + .../symbolic/SymbolicCryptoTest.scala | 51 + .../crypto/provider/tink/TinkCryptoTest.scala | 72 + .../CryptoPrivateStoreExtendedTest.scala | 168 + .../crypto/store/CryptoPrivateStoreTest.scala | 78 + .../crypto/store/CryptoPublicStoreTest.scala | 122 + .../store/db/DbCryptoPrivateStoreTest.scala | 39 + .../store/db/DbCryptoPublicStoreTest.scala | 38 + .../CryptoPrivateStoreTestInMemory.scala | 16 + .../CryptoPublicStoreTestInMemory.scala | 13 + .../canton/data/ActionDescriptionTest.scala | 107 + .../data/CantonTimestampSecondTest.scala | 88 + .../canton/data/CantonTimestampTest.scala | 22 + .../canton/data/ConcurrentHMapTest.scala | 103 + .../canton/data/GenTransactionTreeTest.scala | 632 ++++ .../canton/data/GeneratorsData.scala | 341 ++ .../canton/data/GeneratorsTransferData.scala | 259 ++ .../canton/data/MerkleSeqTest.scala | 204 + .../canton/data/MerkleTreeTest.scala | 300 ++ .../canton/data/PeanoQueueTest.scala | 237 ++ .../canton/data/TaskSchedulerTest.scala | 410 ++ .../TransactionViewDecompositionTest.scala | 297 ++ .../canton/data/TransactionViewTest.scala | 258 ++ .../canton/data/ViewPositionTest.scala | 80 + .../digitalasset/canton/data/package.scala | 17 + .../environment/BootstrapStageTest.scala | 61 + .../canton/error/CantonErrorTest.scala | 121 + .../error/ErrorLoggingContextSpec.scala | 118 + .../canton/error/GeneratorsError.scala | 11 + .../canton/examples/IouSyntax.scala | 19 + .../canton/health/ComponentStatusTest.scala | 64 + .../tests/benchmarks/LtHash16Benchmark.scala | 40 + .../canton/ledger/api/GeneratorsApi.scala | 19 + .../ledger/offset/GeneratorsOffset.scala | 18 + .../lifecycle/FutureUnlessShutdownTest.scala | 53 + .../canton/lifecycle/LifecycleTest.scala | 68 + .../lifecycle/PromiseUnlessShutdownTest.scala | 66 + .../lifecycle/StartAndCloseableTest.scala | 201 + .../logging/NamedEventCapturingLogger.scala | 145 + .../canton/logging/NamedLoggingTest.scala | 96 + ...PrettyPrintingImplicitResolutionTest.scala | 11 + .../logging/pretty/PrettyPrintingTest.scala | 206 ++ .../logging/pretty/PrettyTestInstances.scala | 100 + .../canton/metrics/LoadGaugeTest.scala | 74 + .../canton/metrics/MetricDocTest.scala | 62 + .../grpc/ApiRequestLoggerTest.scala | 866 +++++ .../networking/grpc/CantonGrpcUtilTest.scala | 403 ++ .../CommunityClientChannelBuilderTest.scala | 35 + .../protobuf/ProtobufParsingAttackTest.scala | 120 + .../UntypedVersionedMessageTest.scala | 93 + .../CantonContractIdVersionTest.scala | 55 + .../protocol/ConfirmationPolicyTest.scala | 245 ++ .../protocol/ContractMetadataTest.scala | 55 + .../canton/protocol/ExampleTransaction.scala | 99 + .../protocol/ExampleTransactionFactory.scala | 2816 ++++++++++++++ .../ExampleTransactionFactoryTest.scala | 117 + .../canton/protocol/GeneratorsProtocol.scala | 231 ++ .../protocol/SerializableContractTest.scala | 124 + .../SerializableRawContractInstanceTest.scala | 83 + .../canton/protocol/VerdictTest.scala | 84 + .../WellFormedTransactionMergeTest.scala | 338 ++ .../protocol/WellFormedTransactionTest.scala | 337 ++ .../protocol/messages/AcsCommitmentTest.scala | 81 + .../messages/GeneratorsMessages.scala | 382 ++ .../messages/MediatorResponseTest.scala | 58 + .../SignedProtocolMessageContentTest.scala | 39 + .../messages/TopologyTransactionTest.scala | 135 + .../canton/resource/DbStorageSingleTest.scala | 125 + .../canton/scheduler/CronTest.scala | 97 + .../canton/sequencing/AsyncResultTest.scala | 21 + .../canton/sequencing/DelayLoggerTest.scala | 51 + .../sequencing/GeneratorsSequencing.scala | 45 + ...equencedEventMonotonicityCheckerTest.scala | 203 + .../SequencerAggregatorPekkoTest.scala | 552 +++ .../sequencing/SequencerConnectionTest.scala | 52 + .../sequencing/SequencerTestUtils.scala | 79 + .../grpc/AuthenticationTokenManagerTest.scala | 222 ++ .../SequencerClientAuthenticationTest.scala | 156 + .../client/PeriodicAcknowledgementsTest.scala | 99 + ...esilientSequencerSubscriberPekkoTest.scala | 393 ++ .../ResilientSequencerSubscriptionTest.scala | 509 +++ .../sequencing/client/SendTrackerTest.scala | 292 ++ .../client/SequencedEventTestFixture.scala | 238 ++ .../client/SequencedEventValidatorTest.scala | 503 +++ .../client/SequencerAggregatorTest.scala | 538 +++ .../client/SequencerClientTest.scala | 1112 ++++++ .../client/TestSequencerClientSend.scala | 53 + .../GrpcSequencerSubscriptionTest.scala | 230 ++ ...GrpcSubscriptionErrorRetryPolicyTest.scala | 48 + .../handlers/CounterCaptureTest.scala | 64 + .../protocol/GeneratorsProtocol.scala | 159 + .../protocol/SequencedEventTest.scala | 82 + .../protocol/SubmissionRequestTest.scala | 171 + .../DeterministicEncodingTest.scala | 227 ++ .../HasCryptographicEvidenceTest.scala | 188 + .../serialization/ProtoConverterTest.scala | 40 + .../canton/store/CursorPreheadStoreTest.scala | 125 + .../canton/store/PrunableByTimeTest.scala | 111 + .../store/PruningSchedulerStoreTest.scala | 137 + .../canton/store/SendTrackerStoreTest.scala | 60 + .../store/SequencedEventStoreTest.scala | 929 +++++ .../SequencerCounterTrackerStoreTest.scala | 18 + .../store/db/DatabaseDeadlockTest.scala | 237 ++ .../store/db/DatabaseLimitNbParamTest.scala | 99 + .../store/db/DbIndexedStringsStoreTest.scala | 115 + .../db/DbPruningSchedulerStoreTest.scala | 41 + .../db/DbSendTrackerTrackerStoreTest.scala | 40 + .../store/db/DbSequencedEventStoreTest.scala | 47 + .../DbSequencerCounterTrackerStoreTest.scala | 47 + .../store/db/DbStorageIdempotency.scala | 48 + .../digitalasset/canton/store/db/DbTest.scala | 112 + .../PruningSchedulerStoreTestInMemory.scala | 18 + .../SendTrackerTrackerStoreTestInMemory.scala | 17 + .../SequencedEventStoreTestInMemory.scala | 20 + ...encerCounterTrackerStoreTestInMemory.scala | 23 + .../digitalasset/canton/time/ClockTest.scala | 242 ++ .../canton/time/DomainTimeTrackerTest.scala | 383 ++ .../canton/time/GeneratorsTime.scala | 44 + .../canton/time/PeriodicActionTest.scala | 113 + .../canton/time/RefinedDurationsTest.scala | 45 + .../time/TimeProofRequestSubmitterTest.scala | 177 + .../canton/time/TimeProofTestUtil.scala | 37 + .../canton/topology/GeneratorsTopology.scala | 27 + .../canton/topology/IdentifierTest.scala | 156 + .../PartyToParticipantComputationsTest.scala | 76 + .../topology/TestingIdentityFactoryTest.scala | 262 ++ .../topology/TopologyTransactionTest.scala | 127 + .../CachingDomainTopologyClientTest.scala | 141 + .../IdentityProvidingServiceClientTest.scala | 144 + .../StoreBasedDomainTopologyClientTest.scala | 522 +++ ...toreBasedDomainTopologySnapshotXTest.scala | 16 + .../processing/AuthorizationGraphTest.scala | 232 ++ .../processing/AuthorizationGraphXTest.scala | 253 ++ ...ologyTransactionMessageValidatorTest.scala | 239 ++ ...ransactionAuthorizationValidatorTest.scala | 386 ++ ...ansactionAuthorizationValidatorTestX.scala | 701 ++++ .../SnapshotAuthorizationValidatorTest.scala | 248 ++ ...ologyTimestampPlusEpsilonTrackerTest.scala | 284 ++ .../TopologyTransactionProcessorTest.scala | 229 ++ .../TopologyTransactionTestFactory.scala | 140 + .../TopologyTransactionTestFactoryX.scala | 136 + .../UnionspaceAuthorizationGraphXTest.scala | 268 ++ ...ogyStateForInitializationServiceTest.scala | 133 + .../store/InitializationStoreTest.scala | 92 + .../topology/store/TopologyStoreTest.scala | 1214 ++++++ .../topology/store/TopologyStoreXTest.scala | 324 ++ .../store/TopologyStoreXTestBase.scala | 107 + .../store/TopologyStoreXTestData.scala | 217 ++ .../TopologyTransactionCollectionTest.scala | 161 + ...ogyStateForInitializationServiceTest.scala | 17 + .../store/db/DbTopologyStoreTest.scala | 176 + .../store/db/DbTopologyStoreXHelper.scala | 48 + .../store/db/DbTopologyStoreXTest.scala | 45 + ...ogyStateForInitializationServiceTest.scala | 20 + .../memory/InMemoryTopologyStoreXTest.scala | 19 + .../memory/TopologyStoreTestInMemory.scala | 21 + .../transaction/GeneratorsTransaction.scala | 133 + .../transaction/TopologyChangeOpTest.scala | 85 + ...ValidatingTopologyMappingXChecksTest.scala | 277 ++ .../GrpcTelemetryContextPropagationTest.scala | 101 + .../SerializableTraceContextTest.scala | 54 + .../canton/tracing/SpanningTest.scala | 147 + .../canton/traffic/TopUpQueueTest.scala | 166 + .../canton/util/BatchAggregatorTest.scala | 245 ++ .../canton/util/ByteStringUtilTest.scala | 205 + .../canton/util/CheckedTTest.scala | 730 ++++ .../canton/util/CheckedTest.scala | 537 +++ .../canton/util/DamlPackageLoaderTest.scala | 22 + .../canton/util/DelayUtilTest.scala | 49 + .../canton/util/EitherUtilTest.scala | 57 + .../canton/util/HexStringTest.scala | 44 + .../canton/util/IterableUtilTest.scala | 94 + .../canton/util/LazyValWithContextTest.scala | 84 + .../util/LengthLimitedByteStringTest.scala | 57 + .../canton/util/LfGenerator.scala | 100 + .../canton/util/LfTransactionBuilder.scala | 92 + .../canton/util/LoggerUtilTest.scala | 32 + .../canton/util/MapsUtilTest.scala | 26 + .../canton/util/MessageRecorderTest.scala | 41 + .../canton/util/MonadUtilTest.scala | 31 + .../util/OrderedBucketMergeHubTest.scala | 811 ++++ .../canton/util/PathUtilsTest.scala | 49 + .../canton/util/PekkoUtilTest.scala | 842 +++++ .../canton/util/RangeUtilTest.scala | 24 + .../canton/util/RateLimiterTest.scala | 89 + .../canton/util/ResourceUtilTest.scala | 228 ++ .../canton/util/SeqUtilTest.scala | 51 + .../util/SimpleExecutionQueueTest.scala | 336 ++ .../canton/util/SingleUseCellTest.scala | 34 + .../canton/util/ThereafterTest.scala | 386 ++ .../canton/util/TraverseTest.scala | 302 ++ .../canton/util/TrieMapUtilTest.scala | 32 + .../canton/util/retry/JitterSpec.scala | 96 + .../canton/util/retry/PolicyTest.scala | 811 ++++ .../canton/util/retry/SuccessSpec.scala | 56 + .../canton/version/CantonVersionTest.scala | 106 + .../DamlLfVersionToProtocolVersionsTest.scala | 30 + .../HasProtocolVersionedWrapperTest.scala | 222 ++ .../canton/version/HasTestCloseContext.scala | 23 + .../ProtocolVersionCompatibilityTest.scala | 33 + .../canton/version/ProtocolVersionTest.scala | 117 + .../SerializationDeserializationTest.scala | 105 + .../canton/version/TestProtocolVersions.scala | 13 + .../daml/lf/testing/SampleParties.scala | 14 + .../src/main/daml/ai-analysis/AIAnalysis.daml | 114 + .../demo/src/main/daml/ai-analysis/daml.yaml | 10 + .../demo/src/main/daml/bank/Bank.daml | 172 + .../demo/src/main/daml/bank/daml.yaml | 9 + .../demo/src/main/daml/doctor/Doctor.daml | 116 + .../demo/src/main/daml/doctor/daml.yaml | 12 + .../health-insurance/HealthInsurance.daml | 162 + .../src/main/daml/health-insurance/daml.yaml | 10 + .../daml/medical-records/MedicalRecord.daml | 107 + .../src/main/daml/medical-records/daml.yaml | 9 + .../com/digitalasset/canton/demo/DemoUI.scala | 887 +++++ .../canton/demo/ReferenceDemoScript.scala | 656 ++++ .../com/digitalasset/canton/demo/Runner.scala | 91 + .../community/demo/src/pack/demo/README.md | 65 + .../demo/src/pack/demo/config/banking.conf | 15 + .../demo/src/pack/demo/config/medical.conf | 15 + .../src/pack/demo/config/participant1.conf | 18 + .../src/pack/demo/config/participant2.conf | 18 + .../src/pack/demo/config/participant3.conf | 18 + .../src/pack/demo/config/participant4.conf | 18 + .../src/pack/demo/config/participant5.conf | 20 + .../src/pack/demo/config/participant6.conf | 18 + .../demo/src/pack/demo/demo-native.sc | 23 + .../community/demo/src/pack/demo/demo.conf | 19 + .../community/demo/src/pack/demo/demo.sc | 34 + .../pack/demo/images/canton-logo-small.png | Bin 0 -> 7250 bytes .../demo/src/pack/demo/images/canton-logo.png | Bin 0 -> 10188 bytes .../demo/src/pack/demo/images/canton.ico | Bin 0 -> 55358 bytes .../src/pack/demo/images/create-slides.sh | 5 + .../demo/src/pack/demo/images/demo0.png | Bin 0 -> 1005475 bytes .../demo/src/pack/demo/images/demo1.png | Bin 0 -> 191081 bytes .../demo/src/pack/demo/images/demo10.png | Bin 0 -> 202540 bytes .../demo/src/pack/demo/images/demo11.png | Bin 0 -> 126355 bytes .../demo/src/pack/demo/images/demo12.png | Bin 0 -> 424383 bytes .../demo/src/pack/demo/images/demo13.png | Bin 0 -> 148179 bytes .../demo/src/pack/demo/images/demo14.png | Bin 0 -> 24867 bytes .../demo/src/pack/demo/images/demo15.png | Bin 0 -> 24905 bytes .../demo/src/pack/demo/images/demo16.png | Bin 0 -> 1027294 bytes .../demo/src/pack/demo/images/demo17.png | Bin 0 -> 102370 bytes .../demo/src/pack/demo/images/demo18.png | Bin 0 -> 384524 bytes .../demo/src/pack/demo/images/demo19.png | Bin 0 -> 145762 bytes .../demo/src/pack/demo/images/demo2.png | Bin 0 -> 162794 bytes .../demo/src/pack/demo/images/demo20.png | Bin 0 -> 421462 bytes .../demo/src/pack/demo/images/demo21.png | Bin 0 -> 139534 bytes .../demo/src/pack/demo/images/demo22.png | Bin 0 -> 23871 bytes .../demo/src/pack/demo/images/demo23.png | Bin 0 -> 23712 bytes .../demo/src/pack/demo/images/demo24.png | Bin 0 -> 157329 bytes .../demo/src/pack/demo/images/demo25.png | Bin 0 -> 189584 bytes .../demo/src/pack/demo/images/demo26.png | Bin 0 -> 231931 bytes .../demo/src/pack/demo/images/demo27.png | Bin 0 -> 89120 bytes .../demo/src/pack/demo/images/demo28.png | Bin 0 -> 24399 bytes .../demo/src/pack/demo/images/demo29.png | Bin 0 -> 24445 bytes .../demo/src/pack/demo/images/demo3.png | Bin 0 -> 180567 bytes .../demo/src/pack/demo/images/demo30.png | Bin 0 -> 198937 bytes .../demo/src/pack/demo/images/demo31.png | Bin 0 -> 64616 bytes .../demo/src/pack/demo/images/demo32.png | Bin 0 -> 242551 bytes .../demo/src/pack/demo/images/demo33.png | Bin 0 -> 81845 bytes .../demo/src/pack/demo/images/demo34.png | Bin 0 -> 260149 bytes .../demo/src/pack/demo/images/demo35.png | Bin 0 -> 194129 bytes .../demo/src/pack/demo/images/demo36.png | Bin 0 -> 245417 bytes .../demo/src/pack/demo/images/demo37.png | Bin 0 -> 122609 bytes .../demo/src/pack/demo/images/demo38.png | Bin 0 -> 24491 bytes .../demo/src/pack/demo/images/demo39.png | Bin 0 -> 24437 bytes .../demo/src/pack/demo/images/demo4.png | Bin 0 -> 2962951 bytes .../demo/src/pack/demo/images/demo40.png | Bin 0 -> 129939 bytes .../demo/src/pack/demo/images/demo41.png | Bin 0 -> 155100 bytes .../demo/src/pack/demo/images/demo42.png | Bin 0 -> 224190 bytes .../demo/src/pack/demo/images/demo43.png | Bin 0 -> 187628 bytes .../demo/src/pack/demo/images/demo44.png | Bin 0 -> 25522 bytes .../demo/src/pack/demo/images/demo45.png | Bin 0 -> 25562 bytes .../demo/src/pack/demo/images/demo46.png | Bin 0 -> 867391 bytes .../demo/src/pack/demo/images/demo47.png | Bin 0 -> 867795 bytes .../demo/src/pack/demo/images/demo48.png | Bin 0 -> 385155 bytes .../demo/src/pack/demo/images/demo49.png | Bin 0 -> 385289 bytes .../demo/src/pack/demo/images/demo5.png | Bin 0 -> 2962951 bytes .../demo/src/pack/demo/images/demo50.png | Bin 0 -> 173335 bytes .../demo/src/pack/demo/images/demo51.png | Bin 0 -> 173629 bytes .../demo/src/pack/demo/images/demo52.png | Bin 0 -> 23824 bytes .../demo/src/pack/demo/images/demo53.png | Bin 0 -> 23682 bytes .../demo/src/pack/demo/images/demo54.png | Bin 0 -> 131038 bytes .../demo/src/pack/demo/images/demo55.png | Bin 0 -> 91180 bytes .../demo/src/pack/demo/images/demo56.png | Bin 0 -> 149588 bytes .../demo/src/pack/demo/images/demo57.png | Bin 0 -> 106524 bytes .../demo/src/pack/demo/images/demo6.png | Bin 0 -> 25038 bytes .../demo/src/pack/demo/images/demo7.png | Bin 0 -> 25184 bytes .../demo/src/pack/demo/images/demo8.png | Bin 0 -> 205730 bytes .../demo/src/pack/demo/images/demo9.png | Bin 0 -> 108456 bytes .../demo/src/pack/demo/images/left.png | Bin 0 -> 33560 bytes .../demo/src/pack/demo/images/right.png | Bin 0 -> 34092 bytes .../demo/src/pack/start-demo-win.cmd | 3 + .../demo/src/pack/start-demo.command | 46 + .../tests/DemoExampleIntegrationTest.scala | 28 + .../domain/src/main/protobuf/buf.yaml | 4 + .../v0/domain_initialization_service.proto | 26 + .../domain/admin/v0/domain_service.proto | 41 + ...rise_mediator_administration_service.proto | 44 + ...ise_sequencer_administration_service.proto | 112 + ...erprise_sequencer_connection_service.proto | 35 + .../v0/mediator_initialization_service.proto | 54 + .../v0/sequencer_administration_service.proto | 41 + .../v0/sequencer_initialization_service.proto | 40 + .../sequencer_initialization_snapshot.proto | 12 + .../admin/v0/sequencer_version_service.proto | 15 + .../sequencer_initialization_snapshot.proto | 57 + .../v2/mediator_initialization_service.proto | 31 + .../v2/sequencer_initialization_service.proto | 53 + .../canton/domain/scalapb/package.proto | 14 + .../canton/domain/DomainNodeBootstrap.scala | 832 +++++ .../canton/domain/DomainNodeParameters.scala | 28 + .../domain/admin/grpc/GrpcDomainService.scala | 52 + .../canton/domain/config/DomainConfig.scala | 217 ++ .../domain/config/DomainInitConfig.scala | 11 + .../config/DomainParametersConfig.scala | 122 + .../config/store/BaseNodeSettingsStore.scala | 80 + .../store/DomainNodeSettingsStore.scala | 112 + .../store/StoredDomainNodeSettings.scala | 8 + .../DomainNodeSequencerClientFactory.scala | 162 + ...opologyManagerIdentityInitialization.scala | 111 + .../EmbeddedMediatorInitialization.scala | 147 + .../TopologyManagementInitialization.scala | 312 ++ .../domain/manager/DomainManagerRuntime.scala | 30 + .../manager/DomainManagerRuntimeFactory.scala | 24 + .../ConfirmationResponseProcessor.scala | 726 ++++ .../domain/mediator/FinalizedResponse.scala | 65 + .../canton/domain/mediator/Mediator.scala | 415 +++ .../domain/mediator/MediatorEvent.scala | 52 + .../mediator/MediatorEventDeduplicator.scala | 199 + .../mediator/MediatorEventsProcessor.scala | 337 ++ .../mediator/MediatorRuntimeFactory.scala | 194 + .../mediator/MediatorStateInspection.scala | 29 + .../domain/mediator/MediatorVerdict.scala | 60 + .../domain/mediator/ResponseAggregation.scala | 379 ++ .../domain/mediator/ResponseAggregator.scala | 246 ++ .../domain/mediator/VerdictMessageId.scala | 38 + .../domain/mediator/VerdictSender.scala | 428 +++ .../gprc/InitializeMediatorRequest.scala | 115 + .../gprc/InitializeMediatorResponse.scala | 84 + .../store/FinalizedResponseStore.scala | 285 ++ .../store/MediatorDeduplicationStore.scala | 348 ++ .../domain/mediator/store/MediatorState.scala | 205 + .../canton/domain/metrics/DomainMetrics.scala | 241 ++ .../domain/sequencing/SequencerRuntime.scala | 321 ++ .../sequencing/SequencerRuntimeFactory.scala | 190 + .../admin/client/SequencerAdminClient.scala | 26 + .../grpc/InitializeSequencerRequest.scala | 141 + .../grpc/InitializeSequencerResponse.scala | 46 + .../AuthenticationTokenCache.scala | 88 + .../MemberAuthenticationService.scala | 456 +++ .../MemberAuthenticationStore.scala | 250 ++ .../grpc/AsyncForwardingListener.scala | 44 + .../grpc/IdentityContextHelper.scala | 22 + ...encerAuthenticationServerInterceptor.scala | 172 + .../SequencerConnectServerInterceptor.scala | 60 + .../integrations/state/EphemeralState.scala | 95 + .../sequencing/sequencer/BaseSequencer.scala | 193 + .../sequencer/DatabaseSequencer.scala | 403 ++ .../DirectSequencerClientTransport.scala | 191 + .../sequencing/sequencer/EventSignaller.scala | 60 + .../sequencer/FetchLatestEventsFlow.scala | 54 + .../sequencer/InFlightAggregation.scala | 270 ++ .../sequencer/InFlightAggregationUpdate.scala | 61 + .../sequencer/InFlightAggregations.scala | 26 + .../sequencing/sequencer/LedgerIdentity.scala | 68 + .../LocalSequencerStateEventSignaller.scala | 111 + .../PartitionedTimestampGenerator.scala | 33 + .../sequencer/PollingEventSignaller.scala | 39 + .../sequencing/sequencer/Sequencer.scala | 259 ++ .../sequencer/SequencerConfig.scala | 57 + .../sequencer/SequencerFactory.scala | 102 + .../sequencer/SequencerPruningStatus.scala | 182 + .../sequencer/SequencerReader.scala | 628 ++++ .../sequencer/SequencerSnapshot.scala | 226 ++ .../sequencer/SequencerValidations.scala | 76 + .../sequencer/SequencerWriter.scala | 435 +++ .../sequencer/SequencerWriterConfig.scala | 139 + .../sequencer/SequencerWriterSource.scala | 594 +++ .../sequencer/SignatureVerifier.scala | 52 + .../sequencer/WriterStartupError.scala | 62 + .../errors/CreateSubscriptionError.scala | 35 + .../errors/RegisterMemberError.scala | 17 + .../sequencer/errors/SequencerError.scala | 239 ++ .../errors/SequencerWriteError.scala | 32 + .../domain/sequencing/sequencer/package.scala | 12 + .../sequencer/store/DbSequencerStore.scala | 1300 +++++++ .../store/InMemorySequencerStore.scala | 527 +++ .../store/SequencerMemberCache.scala | 46 + .../sequencer/store/SequencerStore.scala | 799 ++++ .../store/SequencerWriterStore.scala | 107 + .../sequencer/store/VersionedStatus.scala | 49 + .../traffic/MemberTrafficSnapshot.scala | 54 + .../SequencerMemberRateLimiterResult.scala | 14 + .../traffic/SequencerRateLimitManager.scala | 102 + .../traffic/SequencerTrafficStatus.scala | 8 + .../service/CloseNotification.scala | 24 + .../service/DirectSequencerSubscription.scala | 128 + .../DirectSequencerSubscriptionFactory.scala | 60 + .../service/GrpcHandshakeService.scala | 43 + .../service/GrpcManagedSubscription.scala | 172 + .../GrpcSequencerAdministrationService.scala | 56 + .../GrpcSequencerAuthenticationService.scala | 260 ++ .../service/GrpcSequencerConnectService.scala | 111 + .../service/GrpcSequencerService.scala | 859 +++++ ...rpcSequencerTopologyBootstrapService.scala | 84 + .../service/GrpcSequencerVersionService.scala | 15 + .../sequencing/service/SubscriptionPool.scala | 178 + .../server/DynamicDomainGrpcServer.scala | 86 + .../domain/service/HandshakeValidator.scala | 35 + .../service/ServiceAgreementManager.scala | 152 + .../ServiceAgreementAcceptanceStore.scala | 60 + .../DbServiceAgreementAcceptanceStore.scala | 77 + ...emoryServiceAgreementAcceptanceStore.scala | 50 + .../topology/DomainTopologyDispatcher.scala | 1206 ++++++ .../topology/DomainTopologyManager.scala | 534 +++ .../DomainTopologyManagerEventHandler.scala | 141 + .../DomainTopologyManagerRequestService.scala | 507 +++ .../client/DomainInitializationObserver.scala | 95 + ...sterTopologyTransactionResponseStore.scala | 244 ++ .../store/DomainNodeSettingsStoreTest.scala | 97 + .../ConfirmationResponseProcessorTestV5.scala | 1376 +++++++ .../mediator/DefaultVerdictSenderTest.scala | 327 ++ .../MediatorEventDeduplicatorTest.scala | 402 ++ .../MediatorEventStageProcessorTest.scala | 333 ++ .../domain/mediator/MediatorStateTest.scala | 169 + .../canton/domain/mediator/MediatorTest.scala | 182 + .../mediator/ResponseAggregationTestV5.scala | 1417 +++++++ .../domain/mediator/TestVerdictSender.scala | 70 + .../store/FinalizedResponseStoreTest.scala | 181 + .../MediatorDeduplicationStoreTest.scala | 271 ++ .../domain/metrics/DomainTestMetrics.scala | 17 + .../AuthenticationTokenCacheTest.scala | 129 + .../MemberAuthenticationServiceTest.scala | 143 + .../MemberAuthenticationStoreTest.scala | 172 + ...veSequencerMemberAuthenticationStore.scala | 42 + ...rAuthenticationServerInterceptorTest.scala | 268 ++ .../sequencer/BaseSequencerTest.scala | 240 ++ .../sequencer/BftDomainSequencerApiTest.scala | 44 + .../sequencer/DatabaseSequencerApiTest.scala | 51 + .../sequencer/DomainSequencingTestUtils.scala | 36 + .../sequencer/FetchLatestEventsFlowTest.scala | 178 + ...ocalSequencerStateEventSignallerTest.scala | 104 + .../PartitionedTimestampGeneratorTest.scala | 67 + .../sequencer/SequencerApiTest.scala | 913 +++++ .../SequencerPruningStatusTest.scala | 99 + .../sequencer/SequencerReaderTest.scala | 845 +++++ .../sequencing/sequencer/SequencerTest.scala | 215 ++ .../sequencer/SequencerWriterSourceTest.scala | 480 +++ .../sequencer/SequencerWriterTest.scala | 188 + .../TestDatabaseSequencerConfig.scala | 14 + .../store/DbSequencerStoreTest.scala | 71 + .../MultiTenantedSequencerStoreTest.scala | 303 ++ .../store/NonBftDomainSequencerApiTest.scala | 31 + .../store/SequencerMemberCacheTest.scala | 44 + .../sequencer/store/SequencerStoreTest.scala | 1086 ++++++ .../store/SequencerStoreTestInMemory.scala | 13 + .../service/GrpcManagedSubscriptionTest.scala | 121 + .../GrpcSequencerIntegrationTest.scala | 408 ++ .../service/GrpcSequencerServiceTest.scala | 1066 ++++++ .../service/SubscriptionPoolTest.scala | 160 + .../service/ServiceAgreementManagerTest.scala | 85 + .../ServiceAgreementAcceptanceStoreTest.scala | 52 + ...bServiceAgreementAcceptanceStoreTest.scala | 45 + ...AgreementAcceptanceStoreTestInMemory.scala | 18 + .../DomainTopologyDispatcherTest.scala | 824 +++++ ...TopologyTransactionResponseStoreTest.scala | 130 + .../console/BufferedProcessLogger.scala | 23 + .../ConsoleEnvironmentTestHelpers.scala | 52 + .../canton/console/TestConsoleOutput.scala | 103 + .../BaseEnvironmentDefinition.scala | 34 + .../integration/BaseIntegrationTest.scala | 142 + .../integration/CommonTestAliases.scala | 34 + .../CommunityConfigTransforms.scala | 112 + .../CommunityEnvironmentDefinition.scala | 91 + .../canton/integration/CommunityTests.scala | 18 + .../ConcurrentEnvironmentLimiter.scala | 100 + .../canton/integration/EnvironmentSetup.scala | 201 + .../integration/EnvironmentSetupPlugin.scala | 39 + .../canton/integration/HasCycleUtils.scala | 57 + .../HasEnvironmentDefinition.scala | 11 + .../IntegrationTestUtilities.scala | 152 + .../canton/integration/NetworkBootstrap.scala | 24 + .../canton/integration/TestEnvironment.scala | 30 + .../canton/integration/package.scala | 14 + .../ScopedInMemoryMetricsFactory.scala | 29 + .../canton/version/TestProtocolVersions.scala | 17 + canton-3x/community/ledger-api/README.md | 23 + canton-3x/community/ledger-api/VERSION | 2 + .../docs/metering-report-schema.json | 59 + .../community/ledger-api/docs/post-process.sh | 11 + .../community/ledger-api/docs/rst_mmd.tmpl | 143 + .../com/daml/ledger/api/scalapb/package.proto | 14 + .../api/v1/active_contracts_service.proto | 65 + .../v1/admin/config_management_service.proto | 88 + .../identity_provider_config_service.proto | 150 + .../v1/admin/metering_report_service.proto | 58 + .../ledger/api/v1/admin/object_meta.proto | 52 + .../v1/admin/package_management_service.proto | 86 + .../admin/participant_pruning_service.proto | 59 + .../v1/admin/party_management_service.proto | 225 ++ .../v1/admin/user_management_service.proto | 331 ++ .../api/v1/command_completion_service.proto | 104 + .../daml/ledger/api/v1/command_service.proto | 72 + .../api/v1/command_submission_service.proto | 43 + .../com/daml/ledger/api/v1/commands.proto | 230 ++ .../com/daml/ledger/api/v1/completion.proto | 84 + .../ledger/api/v1/contract_metadata.proto | 29 + .../com/daml/ledger/api/v1/event.proto | 241 ++ .../ledger/api/v1/event_query_service.proto | 102 + .../ledger/api/v1/experimental_features.proto | 132 + .../api/v1/ledger_configuration_service.proto | 49 + .../api/v1/ledger_identity_service.proto | 37 + .../daml/ledger/api/v1/ledger_offset.proto | 43 + .../daml/ledger/api/v1/package_service.proto | 100 + .../ledger/api/v1/testing/time_service.proto | 56 + .../daml/ledger/api/v1/trace_context.proto | 14 + .../com/daml/ledger/api/v1/transaction.proto | 126 + .../ledger/api/v1/transaction_filter.proto | 108 + .../ledger/api/v1/transaction_service.proto | 160 + .../com/daml/ledger/api/v1/value.proto | 207 ++ .../daml/ledger/api/v1/version_service.proto | 73 + .../protobuf/com/daml/ledger/api/v2/README.md | 172 + .../api/v2/command_completion_service.proto | 73 + .../daml/ledger/api/v2/command_service.proto | 78 + .../api/v2/command_submission_service.proto | 57 + .../com/daml/ledger/api/v2/commands.proto | 117 + .../com/daml/ledger/api/v2/completion.proto | 81 + .../ledger/api/v2/event_query_service.proto | 58 + .../daml/ledger/api/v2/package_service.proto | 45 + .../ledger/api/v2/participant_offset.proto | 56 + .../com/daml/ledger/api/v2/reassignment.proto | 141 + .../ledger/api/v2/reassignment_command.proto | 89 + .../daml/ledger/api/v2/state_service.proto | 189 + .../ledger/api/v2/testing/time_service.proto | 44 + .../com/daml/ledger/api/v2/transaction.proto | 120 + .../ledger/api/v2/transaction_filter.proto | 29 + .../daml/ledger/api/v2/update_service.proto | 136 + .../daml/ledger/api/v2/version_service.proto | 22 + .../ledger-service/http-json-perf/README.md | 110 + .../http-json-perf/daml/LargeAcs.daml | 131 + .../release/json-api-perf-logback.xml | 23 + .../src/main/resources/gatling.conf | 128 + .../scala/com/daml/http/perf/Config.scala | 109 + .../main/scala/com/daml/http/perf/Main.scala | 220 ++ .../com/daml/http/perf/OracleRunner.scala | 42 + .../com/daml/http/perf/PostgresRunner.scala | 28 + .../daml/http/perf/QueryStoreBracket.scala | 80 + .../perf/scenario/AsyncQueryConstantAcs.scala | 61 + .../com/daml/http/perf/scenario/Checks.scala | 28 + .../scenario/CreateAndExerciseCommand.scala | 38 + .../http/perf/scenario/CreateCommand.scala | 33 + .../http/perf/scenario/ExerciseCommand.scala | 61 + .../perf/scenario/HasArchiveRequest.scala | 21 + .../http/perf/scenario/HasCreateRequest.scala | 51 + .../http/perf/scenario/HasQueryRequest.scala | 19 + .../http/perf/scenario/HasRandomAmount.scala | 14 + .../scenario/MultiUserQueryScenario.scala | 175 + .../http/perf/scenario/SimulationConfig.scala | 48 + .../perf/scenario/SyncQueryConstantAcs.scala | 48 + .../http/perf/scenario/SyncQueryMegaAcs.scala | 131 + .../http/perf/scenario/SyncQueryNewAcs.scala | 52 + .../perf/scenario/SyncQueryVariableAcs.scala | 50 + .../scala/http/FailureTestsCustomToken.scala | 6 + .../scala/http/FailureTestsUserToken.scala | 6 + .../failurelib/scala/http/FailureTests.scala | 484 +++ .../scala/http/HttpTestFixture.scala | 74 + .../scala/http/ToxicSandboxFixture.scala | 95 + .../itlib/resources/it/iouCreateCommand.json | 10 + canton-3x/community/ledger/ledger-README.md | 125 + .../ledger/ledger-api-auth-README.md | 17 + .../community/ledger/ledger-api-core/JCS.md | 75 + .../ledger/ledger-api-core/rootdoc.txt | 17 + .../ledger/ledger-api-core/src/.gitattributes | 3 + .../main/protobuf/daml/platform/index.proto | 22 + .../protobuf/daml/platform/page_tokens.proto | 15 + .../resources/db/migration/NO_AUTO_COPYRIGHT | 3 + .../h2database/V1__Append_only_schema.sha256 | 1 + .../h2database/V1__Append_only_schema.sql | 618 ++++ .../V10__non_optional_ledger_end.sha256 | 1 + .../oracle/V10__non_optional_ledger_end.sql | 12 + ...ticipant_side_command_deduplication.sha256 | 1 + ...participant_side_command_deduplication.sql | 5 + .../oracle/V12__participant_metering.sha256 | 1 + .../oracle/V12__participant_metering.sql | 24 + .../oracle/V13__remove_events_view.sha256 | 1 + .../oracle/V13__remove_events_view.sql | 4 + .../V14__modifiable_users_and_parties.sha256 | 1 + .../V14__modifiable_users_and_parties.sql | 31 + .../V15__creates_driver_metadata.sha256 | 1 + .../oracle/V15__creates_driver_metadata.sql | 1 + .../V16__identity_provider_config.sha256 | 1 + .../oracle/V16__identity_provider_config.sql | 7 + .../oracle/V17__etq_completions.sha256 | 1 + .../migration/oracle/V17__etq_completions.sql | 1 + .../oracle/V18__etq_tables_and_indexes.sha256 | 1 + .../oracle/V18__etq_tables_and_indexes.sql | 58 + ..._identity_provider_id_users_parties.sha256 | 1 + ...19__identity_provider_id_users_parties.sql | 5 + .../oracle/V1__Append_only_schema.sha256 | 1 + .../oracle/V1__Append_only_schema.sql | 548 +++ .../oracle/V20__etq_data_migration.sha256 | 1 + .../oracle/V20__etq_data_migration.sql | 123 + .../oracle/V21__etq_drop_tx_id_indexes.sha256 | 1 + .../oracle/V21__etq_drop_tx_id_indexes.sql | 3 + .../oracle/V22__audience_idp_config.sha256 | 1 + .../oracle/V22__audience_idp_config.sql | 1 + .../oracle/V23__add_domain_id.sha256 | 1 + .../migration/oracle/V23__add_domain_id.sql | 5 + .../V24__add_reassignment_events.sha256 | 1 + .../oracle/V24__add_reassignment_events.sql | 104 + ...for_incomplete_reassignment_queries.sha256 | 1 + ...es_for_incomplete_reassignment_queries.sql | 8 + .../oracle/V26__add_key_maintainers.sha256 | 1 + .../oracle/V26__add_key_maintainers.sql | 2 + .../oracle/V27__add_trace_context.sha256 | 1 + .../oracle/V27__add_trace_context.sql | 6 + .../oracle/V28__drop_ledger_id.sha256 | 1 + .../migration/oracle/V28__drop_ledger_id.sql | 1 + .../V29__nullable_transfer_submitter.sha256 | 1 + .../V29__nullable_transfer_submitter.sql | 5 + .../oracle/V2__Drop_json_indexes.sha256 | 1 + .../oracle/V2__Drop_json_indexes.sql | 7 + .../oracle/V3__Add_string_interning.sha256 | 1 + .../oracle/V3__Add_string_interning.sql | 7 + ...ing_interning_columnt_to_parameters.sha256 | 1 + ...string_interning_columnt_to_parameters.sql | 5 + .../V5__activate_party_interning.sha256 | 1 + .../oracle/V5__activate_party_interning.sql | 7 + .../V6__activate_template_id_interning.sha256 | 1 + .../V6__activate_template_id_interning.sql | 19 + .../oracle/V7_1__drop_event_id_indexes.sha256 | 1 + .../oracle/V7_1__drop_event_id_indexes.sql | 6 + .../oracle/V7__add_filter_table.sha256 | 1 + .../migration/oracle/V7__add_filter_table.sql | 11 + .../oracle/V8__add_user_managment.sha256 | 1 + .../oracle/V8__add_user_managment.sql | 28 + .../V9__add_transaction_metering.sha256 | 1 + .../oracle/V9__add_transaction_metering.sql | 13 + .../V100_0__Append_only_schema.sha256 | 1 + .../postgres/V100_0__Append_only_schema.sql | 434 +++ .../V100_1__Append_only_cleanup.sha256 | 1 + .../postgres/V100_1__Append_only_cleanup.sql | 185 + .../V100_2__Append_only_indices.sha256 | 1 + .../postgres/V100_2__Append_only_indices.sql | 129 + .../V100_3__Append_only_vacuum.sha256 | 1 + .../postgres/V100_3__Append_only_vacuum.sql | 13 + ...configuration_from_parameters_table.sha256 | 1 + ...op_configuration_from_parameters_table.sql | 4 + .../V102__add_initialization_indices.sha256 | 1 + .../V102__add_initialization_indices.sql | 14 + .../V103__remove_duplicate_index.sha256 | 1 + .../postgres/V103__remove_duplicate_index.sql | 12 + .../V104__rename_packages_size_column.sha256 | 1 + .../V104__rename_packages_size_column.sql | 5 + ...V105__drop_unique_index_constraints.sha256 | 1 + .../V105__drop_unique_index_constraints.sql | 9 + ...__add_rejection_status_proto_column.sha256 | 1 + ...106__add_rejection_status_proto_column.sql | 15 + .../V107__parameters_table_cleanup.sha256 | 1 + .../V107__parameters_table_cleanup.sql | 27 + .../postgres/V108__drop_parties.sha256 | 1 + .../migration/postgres/V108__drop_parties.sql | 5 + ...__Add_all_divulgence_pruning_offset.sha256 | 1 + ...109__Add_all_divulgence_pruning_offset.sql | 2 + .../postgres/V10_0__Extract_Event_Data.sha256 | 1 + .../postgres/V10_0__Extract_Event_Data.sql | 26 + .../postgres/V10_2__Extract_Event_Data.sha256 | 1 + .../postgres/V10_2__Extract_Event_Data.sql | 9 + ...d_deduplication_info_to_completions.sha256 | 1 + ..._add_deduplication_info_to_completions.sql | 20 + .../postgres/V111__timestamp_to_bigint.sha256 | 1 + .../postgres/V111__timestamp_to_bigint.sql | 185 + .../V112__add_string_interning.sha256 | 1 + .../postgres/V112__add_string_interning.sql | 7 + .../V113__enable_string_interning.sha256 | 1 + .../V113__enable_string_interning.sql | 69 + ...interning_for_parties_and_templates.sha256 | 1 + ...ng_interning_for_parties_and_templates.sql | 736 ++++ .../postgres/V116__add_filter_table.sha256 | 1 + .../postgres/V116__add_filter_table.sql | 23 + .../V117_1__drop_event_id_indexes.sha256 | 1 + .../V117_1__drop_event_id_indexes.sql | 6 + .../postgres/V117__vacuum_analyze.sha256 | 1 + .../postgres/V117__vacuum_analyze.sql | 4 + .../postgres/V118__add_user_managment.sha256 | 1 + .../postgres/V118__add_user_managment.sql | 29 + .../V119__add_transaction_metering.sha256 | 1 + .../V119__add_transaction_metering.sql | 18 + .../postgres/V11__Disclosures_index.sha256 | 1 + .../postgres/V11__Disclosures_index.sql | 10 + .../V120__non_optional_ledger_end.sha256 | 1 + .../V120__non_optional_ledger_end.sql | 10 + ...ticipant_side_command_deduplication.sha256 | 1 + ...participant_side_command_deduplication.sql | 5 + .../V122__participant_metering.sha256 | 1 + .../postgres/V122__participant_metering.sql | 35 + .../postgres/V123__remove_events_view.sha256 | 1 + .../postgres/V123__remove_events_view.sql | 4 + .../V124__modifiable_users_and_parties.sha256 | 1 + .../V124__modifiable_users_and_parties.sql | 31 + .../V125__creates_driver_metadata.sha256 | 1 + .../V125__creates_driver_metadata.sql | 1 + .../V126__identity_provider_config.sha256 | 1 + .../V126__identity_provider_config.sql | 7 + .../postgres/V127__etq_completions.sha256 | 1 + .../postgres/V127__etq_completions.sql | 2 + .../V128__etq_tables_and_indexes.sha256 | 1 + .../postgres/V128__etq_tables_and_indexes.sql | 58 + ..._identity_provider_id_users_parties.sha256 | 1 + ...29__identity_provider_id_users_parties.sql | 5 + .../postgres/V12__Add_configuration.sha256 | 1 + .../postgres/V12__Add_configuration.sql | 42 + .../postgres/V130__etq_data_migration.sha256 | 1 + .../postgres/V130__etq_data_migration.sql | 107 + .../V131__etq_drop_tx_id_indexes.sha256 | 1 + .../postgres/V131__etq_drop_tx_id_indexes.sql | 3 + .../postgres/V132__audience_idp_config.sha256 | 1 + .../postgres/V132__audience_idp_config.sql | 1 + .../postgres/V133__add_domain_id.sha256 | 1 + .../postgres/V133__add_domain_id.sql | 5 + .../V134__add_reassignment_events.sha256 | 1 + .../V134__add_reassignment_events.sql | 104 + ...for_incomplete_reassignment_queries.sha256 | 1 + ...es_for_incomplete_reassignment_queries.sql | 8 + .../postgres/V136__add_key_maintainers.sha256 | 1 + .../postgres/V136__add_key_maintainers.sql | 2 + .../postgres/V137__add_trace_context.sha256 | 1 + .../postgres/V137__add_trace_context.sql | 6 + .../postgres/V138__drop_ledger_id.sha256 | 1 + .../postgres/V138__drop_ledger_id.sql | 1 + .../V139__nullable_transfer_submitter.sha256 | 1 + .../V139__nullable_transfer_submitter.sql | 5 + .../postgres/V13__Party_entries.sha256 | 1 + .../migration/postgres/V13__Party_entries.sql | 41 + .../postgres/V14__Package_entries.sha256 | 1 + .../postgres/V14__Package_entries.sql | 32 + .../V15__Loosen_transaction_check.sha256 | 1 + .../V15__Loosen_transaction_check.sql | 17 + ...6__Create_command_completions_table.sha256 | 1 + .../V16__Create_command_completions_table.sql | 18 + .../V17__Command_deduplication.sha256 | 1 + .../postgres/V17__Command_deduplication.sql | 19 + .../postgres/V18__Backfill_completions.sha256 | 1 + .../postgres/V18__Backfill_completions.sql | 28 + .../postgres/V19__Fix_Completions.sha256 | 1 + .../postgres/V19__Fix_Completions.sql | 5 + .../db/migration/postgres/V1__Init.sha256 | 1 + .../db/migration/postgres/V1__Init.sql | 144 + .../postgres/V20__Events_new_schema.sha256 | 1 + .../postgres/V20__Events_new_schema.sql | 74 + .../postgres/V21__Stable_offsets.sha256 | 1 + .../postgres/V21__Stable_offsets.sql | 31 + .../V22__Remove_maximum_record_time.sha256 | 1 + .../V22__Remove_maximum_record_time.sql | 13 + .../postgres/V23__Delete_checkpoints.sha256 | 1 + .../postgres/V23__Delete_checkpoints.sql | 37 + .../V24__Stable_offsets_archival.sha256 | 1 + .../postgres/V24__Stable_offsets_archival.sql | 10 + .../V26_0__Contracts_new_schema.sha256 | 1 + .../postgres/V26_0__Contracts_new_schema.sql | 69 + ...V26_2__Contract_create_arg_not_null.sha256 | 1 + .../V26_2__Contract_create_arg_not_null.sql | 10 + .../postgres/V27__Events_table_fixes.sha256 | 1 + .../postgres/V27__Events_table_fixes.sql | 19 + .../postgres/V2_0__Contract_divulgence.sha256 | 1 + .../postgres/V2_0__Contract_divulgence.sql | 24 + .../postgres/V30__Drop_old_schema.sha256 | 1 + .../postgres/V30__Drop_old_schema.sql | 22 + .../V31__Event_witnesses_single_table.sha256 | 1 + .../V31__Event_witnesses_single_table.sql | 22 + .../V32_0__Drop_archived_contracts.sha256 | 1 + .../V32_0__Drop_archived_contracts.sql | 18 + ...Add_witnesses_to_participant_events.sha256 | 1 + ...3__Add_witnesses_to_participant_events.sql | 30 + .../postgres/V34__Parties_is_local.sha256 | 1 + .../postgres/V34__Parties_is_local.sql | 21 + .../postgres/V35__event_sequential_id.sha256 | 1 + .../postgres/V35__event_sequential_id.sql | 28 + .../postgres/V36__drop_participant_id.sha256 | 1 + .../postgres/V36__drop_participant_id.sql | 13 + ...7__add_participant_id_to_parameters.sha256 | 1 + .../V37__add_participant_id_to_parameters.sql | 4 + .../postgres/V39__Participant-pruning.sha256 | 1 + .../postgres/V39__Participant-pruning.sql | 13 + .../postgres/V40__multiple_submitters.sha256 | 1 + .../postgres/V40__multiple_submitters.sql | 12 + .../postgres/V41__hash_indices.sha256 | 1 + .../migration/postgres/V41__hash_indices.sql | 11 + .../postgres/V42__Convert_hash_indices.sha256 | 1 + .../postgres/V42__Convert_hash_indices.sql | 8 + .../postgres/V43__explicit_compression.sha256 | 1 + .../postgres/V43__explicit_compression.sql | 24 + .../postgres/V44__offset_as_text.sha256 | 1 + .../postgres/V44__offset_as_text.sql | 37 + .../V45__fix_large_size_index_issues.sha256 | 1 + .../V45__fix_large_size_index_issues.sql | 5 + .../postgres/V4_0__Add_parties.sha256 | 1 + .../migration/postgres/V4_0__Add_parties.sql | 25 + .../postgres/V5__Add_packages.sha256 | 1 + .../migration/postgres/V5__Add_packages.sql | 31 + .../V6__External_Ledger_Offset.sha256 | 1 + .../postgres/V6__External_Ledger_Offset.sql | 12 + .../postgres/V7__Command_deduplication.sha256 | 1 + .../postgres/V7__Command_deduplication.sql | 15 + .../postgres/V8__Contract_Divulgence.sha256 | 1 + .../postgres/V8__Contract_Divulgence.sql | 24 + .../postgres/V9__Contract_Divulgence.sha256 | 1 + .../postgres/V9__Contract_Divulgence.sql | 34 + .../db/migration/recompute-sha256sums.sh | 9 + .../main/resources/metering-keys/README.md | 4 + .../resources/metering-keys/community.json | 5 + .../generator/ErrorCategoryDocItem.scala | 14 + .../ErrorCategoryInventoryDocsGenerator.scala | 42 + .../error/generator/ErrorCodeDocItem.scala | 29 + .../ErrorCodeDocumentationGenerator.scala | 228 ++ .../ErrorCodeInventoryDocsGenerator.scala | 201 + .../error/generator/ErrorGroupDocItem.scala | 18 + .../ErrorCategoryInventoryDocsGenApp.scala | 29 + .../app/ErrorCodeInventoryDocsGenApp.scala | 30 + .../canton/error/generator/app/Main.scala | 93 + .../canton/ledger/api/ProxyCloseable.scala | 11 + .../ledger/api/SubmissionIdGenerator.scala | 20 + .../canton/ledger/api/TraceIdentifiers.scala | 59 + .../canton/ledger/api/ValidationLogger.scala | 23 + .../canton/ledger/api/auth/AuthService.scala | 50 + .../ledger/api/auth/AuthServiceJWT.scala | 166 + .../api/auth/AuthServiceJWTPayload.scala | 479 +++ .../ledger/api/auth/AuthServiceWildcard.scala | 18 + .../ledger/api/auth/AuthorizationError.scala | 63 + .../canton/ledger/api/auth/Authorizer.scala | 417 +++ .../api/auth/CachedJwtVerifierLoader.scala | 101 + .../canton/ledger/api/auth/Claims.scala | 195 + ...IdentityProviderAwareAuthServiceImpl.scala | 136 + .../auth/IdentityProviderConfigLoader.scala | 17 + .../ledger/api/auth/JwtVerifierLoader.scala | 13 + .../auth/OngoingAuthorizationObserver.scala | 194 + .../auth/UserRightsChangeAsyncChecker.scala | 76 + .../auth/client/LedgerCallCredentials.java | 40 + .../interceptor/AsyncForwardingListener.scala | 42 + .../AuthorizationInterceptor.scala | 247 ++ .../IdentityProviderAwareAuthService.scala | 16 + .../ActiveContractsServiceAuthorization.scala | 41 + ...ommandCompletionServiceAuthorization.scala | 43 + ...mandCompletionServiceV2Authorization.scala | 41 + .../CommandServiceAuthorization.scala | 80 + .../CommandServiceV2Authorization.scala | 77 + ...ommandSubmissionServiceAuthorization.scala | 41 + ...mandSubmissionServiceV2Authorization.scala | 49 + ...ConfigManagementServiceAuthorization.scala | 33 + .../EventQueryServiceAuthorization.scala | 49 + .../EventQueryServiceV2Authorization.scala | 37 + ...tyProviderConfigServiceAuthorization.scala | 50 + ...gerConfigurationServiceAuthorization.scala | 37 + .../LedgerIdentityServiceAuthorization.scala | 37 + .../MeteringReportServiceAuthorization.scala | 38 + ...ackageManagementServiceAuthorization.scala | 35 + .../PackageServiceAuthorization.scala | 38 + .../PackageServiceV2Authorization.scala | 41 + ...rticipantPruningServiceAuthorization.scala | 35 + .../PartyManagementServiceAuthorization.scala | 81 + .../services/StateServiceAuthorization.scala | 51 + .../services/TimeServiceAuthorization.scala | 38 + .../services/TimeServiceV2Authorization.scala | 32 + .../TransactionServiceAuthorization.scala | 87 + .../services/UpdateServiceAuthorization.scala | 76 + .../UserManagementServiceAuthorization.scala | 167 + .../canton/ledger/api/domain/domain.scala | 321 ++ .../canton/ledger/api/grpc/DropRepeated.scala | 48 + .../api/grpc/GrpcActiveContractsService.scala | 50 + .../ledger/api/grpc/GrpcApiService.scala | 11 + .../ledger/api/grpc/GrpcHealthService.scala | 88 + .../grpc/GrpcLedgerConfigurationService.scala | 49 + .../ledger/api/grpc/GrpcPackageService.scala | 69 + .../canton/ledger/api/grpc/Logging.scala | 13 + .../StreamingServiceLifecycleManagement.scala | 93 + .../ledger/api/health/HealthChecks.scala | 30 + .../ledger/api/health/HealthStatus.scala | 23 + .../ledger/api/health/ReportsHealth.scala | 11 + .../completion/CompletionEndRequest.scala | 6 + .../completion/CompletionStreamRequest.scala | 14 + .../SubmitReassignmentRequest.scala | 28 + .../command/submission/SubmitRequest.scala | 8 + .../event/GetEventsByContractIdRequest.scala | 12 + .../event/GetEventsByContractKeyRequest.scala | 15 + .../transaction/GetLedgerEndRequest.scala | 6 + .../GetTransactionByEventIdRequest.scala | 13 + .../GetTransactionByIdRequest.scala | 13 + .../GetTransactionTreesRequest.scala | 15 + .../transaction/GetTransactionsRequest.scala | 14 + .../services/CommandCompletionService.scala | 23 + .../ledger/api/services/CommandService.scala | 33 + .../services/CommandSubmissionService.scala | 17 + .../api/services/EventQueryService.scala | 27 + .../api/services/TransactionService.scala | 55 + .../api/validation/CommandsValidator.scala | 344 ++ .../CompletionServiceRequestValidator.scala | 69 + .../DeduplicationPeriodValidator.scala | 56 + .../EventQueryServiceRequestValidator.scala | 70 + .../api/validation/FieldValidator.scala | 404 ++ .../validation/LedgerOffsetValidator.scala | 96 + .../api/validation/PartyNameChecker.scala | 21 + .../api/validation/PartyValidator.scala | 34 + .../ResourceAnnotationValidator.scala | 149 + .../SubmitAndWaitRequestValidator.scala | 36 + .../validation/SubmitRequestValidator.scala | 100 + .../TransactionFilterValidator.scala | 156 + .../TransactionServiceRequestValidator.scala | 192 + .../ValidateDisclosedContracts.scala | 117 + .../api/validation/ValidationErrors.scala | 34 + .../api/validation/ValueValidator.scala | 187 + .../canton/ledger/client/GrpcChannel.scala | 53 + .../canton/ledger/client/LedgerClient.scala | 182 + .../CommandClientConfiguration.scala | 28 + .../LedgerClientChannelConfiguration.scala | 35 + .../LedgerClientConfiguration.scala | 16 + .../configuration/LedgerIdRequirement.scala | 34 + .../services/EventQueryServiceClient.scala | 50 + .../acs/ActiveContractSetClient.scala | 35 + .../ActiveContractSetClient.scala | 69 + .../admin/IdentityProviderConfigClient.scala | 101 + .../services/admin/MeteringReportClient.scala | 25 + .../admin/PackageManagementClient.scala | 39 + .../ParticipantPruningManagementClient.scala | 30 + .../admin/PartyManagementClient.scala | 84 + .../services/admin/UserManagementClient.scala | 171 + .../services/commands/CommandClient.scala | 70 + .../commands/CommandCompletionSource.scala | 44 + .../services/commands/CommandSubmission.scala | 10 + .../commands/CommandSubmissionFlow.scala | 42 + .../commands/CommandUpdaterFlow.scala | 59 + .../commands/CompletionStreamElement.scala | 18 + .../commands/SynchronousCommandClient.scala | 51 + .../withoutledgerid/CommandClient.scala | 105 + .../identity/LedgerIdentityClient.scala | 44 + .../client/services/pkg/PackageClient.scala | 25 + .../pkg/withoutledgerid/PackageClient.scala | 43 + .../services/testing/time/StaticTime.scala | 112 + .../transactions/TransactionClient.scala | 72 + .../transactions/TransactionSource.scala | 40 + .../withoutledgerid/TransactionClient.scala | 135 + .../services/version/VersionClient.scala | 19 + .../withoutledgerid/VersionClient.scala | 36 + .../client/withoutledgerid/LedgerClient.scala | 163 + .../state/index/v2/ContractStore.scala | 59 + .../state/index/v2/IdentityProvider.scala | 13 + .../v2/IndexActiveContractsService.scala | 24 + .../index/v2/IndexCompletionsService.scala | 30 + .../v2/IndexConfigManagementService.scala | 31 + .../index/v2/IndexConfigurationService.scala | 17 + .../index/v2/IndexEventQueryService.scala | 32 + .../state/index/v2/IndexPackagesService.scala | 38 + .../v2/IndexParticipantPruningService.scala | 19 + .../v2/IndexPartyManagementService.scala | 31 + .../state/index/v2/IndexService.scala | 22 + .../index/v2/IndexTransactionsService.scala | 53 + .../state/index/v2/IndexerPartyDetails.scala | 18 + .../state/index/v2/LedgerEndService.scala | 14 + .../index/v2/MaximumLedgerTimeService.scala | 50 + .../state/index/v2/MeteringStore.scala | 44 + .../state/index/v2/PartyEntry.scala | 18 + .../participant/state/index/v2/package.scala | 102 + .../participant/state/v2/CompletionInfo.scala | 74 + .../state/v2/DivulgedContract.scala | 19 + .../participant/state/v2/PruningResult.scala | 33 + .../participant/state/v2/ReadService.scala | 172 + .../participant/state/v2/Reassignment.scala | 67 + .../state/v2/ReassignmentCommand.scala | 27 + .../participant/state/v2/SubmitterInfo.scala | 76 + .../state/v2/TransactionMeta.scala | 52 + .../ledger/participant/state/v2/Update.scala | 563 +++ .../state/v2/WriteConfigService.scala | 38 + .../state/v2/WritePackagesService.scala | 50 + .../v2/WriteParticipantPruningService.scala | 52 + .../state/v2/WritePartyService.scala | 41 + .../participant/state/v2/WriteService.scala | 151 + .../state/v2/metrics/TimedReadService.scala | 43 + .../state/v2/metrics/TimedWriteService.scala | 127 + .../ledger/participant/state/v2/package.scala | 46 + .../ledger/runner/common/OptConfigValue.scala | 93 + .../common/PureConfigReaderWriter.scala | 285 ++ .../canton/platform/ApiOffset.scala | 37 + .../canton/platform/DispatcherState.scala | 153 + .../canton/platform/InMemoryState.scala | 124 + .../canton/platform/LedgerApiServer.scala | 54 + .../platform/TemplatePartiesFilter.scala | 8 + .../ActiveStreamMetricsInterceptor.scala | 63 + .../platform/apiserver/ApiException.scala | 13 + .../platform/apiserver/ApiService.scala | 13 + .../platform/apiserver/ApiServiceOwner.scala | 221 ++ .../platform/apiserver/ApiServices.scala | 551 +++ .../ExecutionSequencerFactoryOwner.scala | 26 + .../platform/apiserver/GrpcServer.scala | 111 + .../platform/apiserver/LedgerApiService.scala | 66 + .../platform/apiserver/LedgerFeatures.scala | 23 + .../platform/apiserver/SeedService.scala | 82 + .../apiserver/TimeServiceBackend.scala | 31 + .../apiserver/TimedIndexService.scala | 280 ++ .../TruncatedStatusInterceptor.scala | 30 + .../LedgerConfigurationInitializer.scala | 44 + .../LedgerConfigurationSubscription.scala | 15 + ...erConfigurationSubscriptionFromIndex.scala | 179 + .../configuration/RateLimitingConfig.scala | 40 + .../apiserver/error/ErrorInterceptor.scala | 155 + .../execution/AuthorityResolver.scala | 41 + .../execution/CommandExecutionResult.scala | 43 + .../apiserver/execution/CommandExecutor.scala | 22 + .../DynamicDomainParameterGetter.scala | 26 + .../LedgerTimeAwareCommandExecutor.scala | 132 + .../execution/ResolveMaximumLedgerTime.scala | 65 + .../StoreBackedCommandExecutor.scala | 595 +++ .../execution/TimedCommandExecutor.scala | 34 + .../apiserver/meteringreport/HmacSha256.scala | 68 + .../apiserver/meteringreport/Jcs.scala | 52 + .../apiserver/meteringreport/JcsSigner.scala | 78 + .../meteringreport/MeteringReport.scala | 66 + .../MeteringReportGenerator.scala | 65 + .../meteringreport/MeteringReportKey.scala | 41 + .../apiserver/ratelimiting/LimitResult.scala | 46 + .../apiserver/ratelimiting/MemoryCheck.scala | 139 + .../RateLimitingInterceptor.scala | 104 + .../apiserver/ratelimiting/StreamCheck.scala | 49 + .../ratelimiting/ThreadpoolCheck.scala | 44 + .../services/ApiActiveContractsService.scala | 134 + .../ApiCommandCompletionService.scala | 98 + .../ApiCommandCompletionServiceV2.scala | 105 + .../services/ApiCommandService.scala | 100 + .../services/ApiCommandServiceV2.scala | 238 ++ .../ApiCommandSubmissionService.scala | 99 + .../ApiCommandSubmissionServiceV2.scala | 192 + .../apiserver/services/ApiConversions.scala | 232 ++ .../services/ApiEventQueryService.scala | 81 + .../services/ApiEventQueryServiceV2.scala | 68 + .../ApiLedgerConfigurationService.scala | 86 + .../services/ApiLedgerIdentityService.scala | 67 + .../services/ApiPackageService.scala | 174 + .../services/ApiPackageServiceV2.scala | 154 + .../apiserver/services/ApiStateService.scala | 181 + .../apiserver/services/ApiTimeService.scala | 181 + .../apiserver/services/ApiTimeServiceV2.scala | 110 + .../services/ApiTransactionService.scala | 179 + .../apiserver/services/ApiUpdateService.scala | 376 ++ .../services/ApiVersionService.scala | 131 + .../services/ApiVersionServiceV2.scala | 121 + .../services/RejectionGenerators.scala | 183 + .../apiserver/services/StreamMetrics.scala | 16 + .../admin/ApiConfigManagementService.scala | 309 ++ .../ApiIdentityProviderConfigService.scala | 234 ++ .../admin/ApiMeteringReportService.scala | 135 + .../admin/ApiPackageManagementService.scala | 244 ++ .../admin/ApiParticipantPruningService.scala | 239 ++ .../admin/ApiPartyManagementService.scala | 679 ++++ .../admin/ApiUserManagementService.scala | 659 ++++ .../admin/IdentityProviderExists.scala | 21 + .../services/admin/PartyRecordsExist.scala | 25 + .../services/admin/SynchronousResponse.scala | 168 + .../apiserver/services/admin/Utils.scala | 18 + .../apiserver/services/admin/package.scala | 16 + .../CommandCompletionServiceImpl.scala | 167 + .../services/command/CommandServiceImpl.scala | 275 ++ .../CommandSubmissionServiceImpl.scala | 275 ++ .../apiserver/services/logging/package.scala | 122 + .../platform/apiserver/services/package.scala | 44 + .../tracking/CancellableTimeoutSupport.scala | 58 + .../tracking/CompletionResponse.scala | 71 + .../services/tracking/SubmissionTracker.scala | 230 ++ .../transaction/EventQueryServiceImpl.scala | 107 + .../transaction/TransactionServiceImpl.scala | 338 ++ .../apiserver/update/FieldNames.scala | 76 + .../IdentityProviderConfigUpdateMapper.scala | 100 + .../update/PartyRecordUpdateMapper.scala | 78 + .../apiserver/update/UpdateMapperBase.scala | 91 + .../apiserver/update/UpdatePath.scala | 30 + .../apiserver/update/UpdatePathError.scala | 29 + .../apiserver/update/UpdatePathsTrie.scala | 154 + .../update/UpdateRequestsPaths.scala | 77 + .../apiserver/update/UserUpdateMapper.scala | 77 + .../platform/apiserver/update/update.scala | 8 + .../config/CommandServiceConfig.scala | 31 + .../platform/config/IndexServiceConfig.scala | 209 ++ .../config/InvalidConfigException.scala | 10 + .../canton/platform/config/Readers.scala | 36 + .../canton/platform/config/ServerRole.scala | 30 + .../config/UserManagementServiceConfig.scala | 38 + ...ctStoreBasedMaximumLedgerTimeService.scala | 66 + .../platform/index/InMemoryStateUpdater.scala | 507 +++ .../platform/index/IndexServiceImpl.scala | 729 ++++ .../platform/index/IndexServiceOwner.scala | 226 ++ .../platform/indexer/IndexerConfig.scala | 70 + .../indexer/IndexerServiceOwner.scala | 136 + .../platform/indexer/IndexerStartupMode.scala | 26 + .../canton/platform/indexer/JdbcIndexer.scala | 172 + .../platform/indexer/MeteringAggregator.scala | 214 ++ .../indexer/PackageMetadataViewConfig.scala | 29 + .../platform/indexer/RecoveringIndexer.scala | 186 + .../indexer/UpdatePackageMetadataView.scala | 122 + .../platform/indexer/ha/HaCoordinator.scala | 205 + .../indexer/ha/KillSwitchCaptor.scala | 71 + .../platform/indexer/ha/PollingChecker.scala | 89 + .../indexer/ha/PreemptableSequence.scala | 222 ++ .../canton/platform/indexer/package.scala | 15 + .../indexer/parallel/AsyncSupport.scala | 54 + .../platform/indexer/parallel/BatchN.scala | 60 + .../BatchingParallelIngestionPipe.scala | 45 + .../InitializeParallelIngestion.scala | 78 + .../parallel/ParallelIndexerFactory.scala | 220 ++ .../ParallelIndexerSubscription.scala | 373 ++ .../CachedIdentityProviderConfigStore.scala | 96 + .../CachedUserManagementStore.scala | 125 + .../IdentityProviderManagementConfig.scala | 16 + .../InMemoryIdentityProviderConfigStore.scala | 133 + .../localstore/InMemoryPartyRecordStore.scala | 267 ++ .../InMemoryUserManagementStore.scala | 282 ++ .../canton/platform/localstore/Ops.scala | 33 + ...ersistentIdentityProviderConfigStore.scala | 242 ++ .../PersistentPartyRecordStore.scala | 364 ++ .../PersistentUserManagementStore.scala | 426 +++ .../api/IdentityProviderConfigStore.scala | 75 + .../api/IdentityProviderConfigUpdate.scala | 14 + .../platform/localstore/api/PartyRecord.scala | 13 + .../localstore/api/PartyRecordStore.scala | 72 + .../localstore/api/UserManagementStore.scala | 198 + .../utils/LocalAnnotationsUtils.scala | 14 + .../canton/platform/package.scala | 64 + .../packages/DeduplicatingPackageLoader.scala | 64 + .../store/CompletionFromTransaction.scala | 163 + .../canton/platform/store/DbSupport.scala | 82 + .../canton/platform/store/DbType.scala | 49 + .../platform/store/EventSequentialId.scala | 10 + .../platform/store/FlywayMigrations.scala | 174 + .../store/ScalaPbStreamingOptimizations.scala | 36 + .../platform/store/backend/Conversions.scala | 263 ++ .../canton/platform/store/backend/DbDto.scala | 253 ++ .../backend/DbDtoToStringsForInterning.scala | 91 + .../store/backend/StorageBackend.scala | 534 +++ .../store/backend/StorageBackendFactory.scala | 84 + .../store/backend/UpdateToDbDto.scala | 563 +++ .../store/backend/UpdateToMeteringDbDto.scala | 60 + .../store/backend/VerifiedDataSource.scala | 60 + .../common/CommonStorageBackendFactory.scala | 49 + .../CompletionStorageBackendTemplate.scala | 212 ++ .../backend/common/ComposableQuery.scala | 83 + .../ConfigurationStorageBackendTemplate.scala | 111 + .../ContractStorageBackendTemplate.scala | 323 ++ .../common/DataSourceStorageBackendImpl.scala | 20 + .../backend/common/EventReaderQueries.scala | 181 + .../common/EventStorageBackendTemplate.scala | 1268 +++++++ .../platform/store/backend/common/Field.scala | 126 + .../IngestionStorageBackendTemplate.scala | 66 + .../common/InitHookDataSourceProxy.scala | 73 + .../common/IntegrityStorageBackendImpl.scala | 105 + .../MeteringParameterStorageBackendImpl.scala | 83 + .../common/MeteringStorageBackendImpl.scala | 250 ++ .../PackageStorageBackendTemplate.scala | 116 + .../common/ParameterStorageBackendImpl.scala | 185 + .../common/PartyStorageBackendTemplate.scala | 150 + .../store/backend/common/QueryStrategy.scala | 141 + .../store/backend/common/Schema.scala | 538 +++ .../backend/common/SimpleSqlAsVectorOf.scala | 51 + .../StringInterningStorageBackendImpl.scala | 30 + .../platform/store/backend/common/Table.scala | 80 + .../common/TransactionPointwiseQueries.scala | 166 + .../common/TransactionStreamingQueries.scala | 313 ++ .../backend/h2/H2ContractStorageBackend.scala | 79 + .../backend/h2/H2DBLockStorageBackend.scala | 24 + .../h2/H2DataSourceStorageBackend.scala | 58 + .../backend/h2/H2EventStorageBackend.scala | 36 + .../platform/store/backend/h2/H2Field.scala | 35 + .../store/backend/h2/H2FunctionAliases.scala | 15 + .../store/backend/h2/H2QueryStrategy.scala | 32 + .../backend/h2/H2ResetStorageBackend.scala | 54 + .../platform/store/backend/h2/H2Schema.scala | 50 + .../backend/h2/H2StorageBackendFactory.scala | 85 + .../platform/store/backend/h2/H2Table.scala | 81 + .../IdentityProviderStorageBackend.scala | 57 + .../IdentityProviderStorageBackendImpl.scala | 186 + .../ParticipantMetadataBackend.scala | 112 + .../PartyRecordStorageBackend.scala | 56 + .../PartyRecordStorageBackendImpl.scala | 165 + .../localstore/ResourceVersionOps.scala | 17 + .../UserManagementStorageBackend.scala | 82 + .../UserManagementStorageBackendImpl.scala | 365 ++ .../oracle/OracleDBLockStorageBackend.scala | 72 + .../OracleDataSourceStorageBackend.scala | 28 + .../oracle/OracleEventStorageBackend.scala | 37 + .../store/backend/oracle/OracleField.scala | 52 + .../backend/oracle/OracleQueryStrategy.scala | 49 + .../oracle/OracleResetStorageBackend.scala | 50 + .../store/backend/oracle/OracleSchema.scala | 45 + .../oracle/OracleStorageBackendFactory.scala | 80 + .../store/backend/oracle/OracleTable.scala | 63 + .../store/backend/postgresql/PGField.scala | 59 + .../store/backend/postgresql/PGSchema.scala | 50 + .../store/backend/postgresql/PGTable.scala | 68 + .../PostgresDBLockStorageBackend.scala | 52 + .../PostgresDataSourceStorageBackend.scala | 126 + .../PostgresEventStorageBackend.scala | 69 + .../postgresql/PostgresQueryStrategy.scala | 44 + .../PostgresResetStorageBackend.scala | 50 + .../PostgresStorageBackendFactory.scala | 62 + .../store/cache/ContractKeyStateCache.scala | 42 + .../store/cache/ContractStateCaches.scala | 107 + .../store/cache/ContractsStateCache.scala | 54 + .../store/cache/InMemoryFanoutBuffer.scala | 247 ++ .../platform/store/cache/LedgerEndCache.scala | 33 + .../MutableCacheBackedContractStore.scala | 203 + .../platform/store/cache/StateCache.scala | 220 ++ .../canton/platform/store/cache/package.scala | 21 + .../BufferedCommandCompletionsReader.scala | 109 + .../store/dao/BufferedStreamsReader.scala | 139 + .../dao/BufferedTransactionByIdReader.scala | 69 + .../store/dao/CommandCompletionsReader.scala | 78 + .../store/dao/DatabaseSelfServiceError.scala | 106 + .../platform/store/dao/DbDispatcher.scala | 170 + .../store/dao/EventProjectionProperties.scala | 121 + .../dao/HikariJdbcConnectionProvider.scala | 121 + .../store/dao/JdbcConnectionProvider.scala | 20 + .../platform/store/dao/JdbcLedgerDao.scala | 804 ++++ .../canton/platform/store/dao/LedgerDao.scala | 278 ++ .../store/dao/PaginatingAsyncStream.scala | 124 + .../store/dao/PersistenceResponse.scala | 12 + .../platform/store/dao/QueryRange.scala | 9 + .../store/dao/SequentialWriteDao.scala | 171 + .../platform/store/dao/events/ACSReader.scala | 756 ++++ .../events/BufferedTransactionsReader.scala | 281 ++ .../store/dao/events/CompressionMetrics.scala | 36 + .../dao/events/CompressionStrategy.scala | 72 + .../store/dao/events/ContractLoader.scala | 235 ++ .../store/dao/events/ContractStateEvent.scala | 36 + .../store/dao/events/ContractsReader.scala | 227 ++ .../store/dao/events/EventIdsUtils.scala | 50 + .../store/dao/events/EventsRange.scala | 14 + .../store/dao/events/EventsReader.scala | 134 + .../store/dao/events/EventsTable.scala | 172 + .../store/dao/events/FilterUtils.scala | 20 + .../store/dao/events/IdPageSizing.scala | 90 + .../store/dao/events/LfValueTranslation.scala | 623 ++++ .../store/dao/events/QueryNonPruned.scala | 75 + .../platform/store/dao/events/Raw.scala | 351 ++ .../dao/events/ReassignmentStreamReader.scala | 270 ++ .../dao/events/TransactionConversion.scala | 26 + .../TransactionLogUpdatesConversions.scala | 630 ++++ .../events/TransactionPointwiseReader.scala | 156 + .../events/TransactionsFlatStreamReader.scala | 299 ++ .../store/dao/events/TransactionsReader.scala | 343 ++ .../events/TransactionsTreeStreamReader.scala | 359 ++ .../platform/store/dao/events/Utils.scala | 13 + .../store/entries/ConfigurationEntry.scala | 30 + .../store/entries/PackageLedgerEntry.scala | 41 + .../store/entries/PartyLedgerEntry.scala | 30 + .../interfaces/LedgerDaoContractsReader.scala | 93 + .../interfaces/TransactionLogUpdate.scala | 151 + .../store/interning/RawStringInterning.scala | 55 + .../store/interning/StringInterning.scala | 92 + .../store/interning/StringInterningView.scala | 142 + .../store/packagemeta/PackageMetadata.scala | 86 + .../packagemeta/PackageMetadataView.scala | 29 + .../store/serialization/Compression.scala | 49 + .../store/serialization/ValueSerializer.scala | 59 + .../store/utils/ConcurrencyLimiter.scala | 53 + .../platform/store/utils/Telemetry.scala | 32 + .../com/digitalasset/canton/util/Ctx.scala | 30 + .../ExtractMaterializedValue.scala | 98 + .../util/pekkostreams/ImmutableIterable.scala | 13 + .../util/pekkostreams/MaxInFlight.scala | 148 + .../db/migration/EmptyJavaMigration.scala | 16 + .../scala/db/migration/oracle/package.scala | 6 + .../postgres/V10_1__Populate_Event_Data.scala | 11 + .../V25__Backfill_Participant_Events.scala | 9 + .../V26_1__Fill_create_argument.scala | 9 + .../postgres/V28__Fix_key_hashes.scala | 9 + .../V29__Fix_participant_events.scala | 9 + .../postgres/V2_1__Rebuild_Acs.scala | 11 + .../postgres/V32_1__Fix_key_hashes.scala | 9 + .../postgres/V38__Update_value_versions.scala | 9 + .../postgres/V3__Recompute_Key_Hash.scala | 11 + .../postgres/V4_1__Collect_Parties.scala | 11 + .../src/test/resources/OracleLog.properties | 6 + .../src/test/resources/config/test.conf | 6 + .../src/test/resources/config/test2.conf | 6 + .../src/test/resources/config/testp.conf | 3 + .../src/test/resources/test-metering-key.json | 5 + .../ErrorCodeDocumentationGeneratorSpec.scala | 154 + .../canton/ledger/api/DomainMocks.scala | 48 + .../ledger/api/IdentityProviderIdSpec.scala | 78 + .../ledger/api/TraceIdentifiersTest.scala | 48 + .../api/ValueConversionRoundTripTest.scala | 158 + .../api/auth/AuthServiceJWTCodecSpec.scala | 545 +++ .../auth/AuthorizationInterceptorSpec.scala | 102 + .../ledger/api/auth/AuthorizerSpec.scala | 89 + .../OngoingAuthorizationObserverSpec.scala | 214 ++ .../ledger/api/grpc/DropRepeatedSpec.scala | 60 + .../api/grpc/GrpcHealthServiceSpec.scala | 345 ++ ...ompletionServiceRequestValidatorTest.scala | 204 + .../DeduplicationPeriodValidatorSpec.scala | 43 + ...ventQueryServiceRequestValidatorTest.scala | 119 + .../validation/IdentifierValidatorTest.scala | 48 + .../ResourceAnnotationValidationsSpec.scala | 130 + .../SubmitRequestValidatorTest.scala | 985 +++++ ...ansactionServiceRequestValidatorTest.scala | 798 ++++ .../ValidateDisclosedContractsTest.scala | 328 ++ .../api/validation/ValidatorTestUtils.scala | 106 + .../LedgerIdRequirementTest.scala | 71 + .../commands/CommandSubmissionFlowTest.scala | 45 + .../commands/CommandUpdaterFlowTest.scala | 94 + .../runner/common/ArbitraryConfig.scala | 321 ++ .../common/PureConfigReaderWriterSpec.scala | 594 +++ .../canton/platform/DispatcherStateSpec.scala | 167 + .../canton/platform/InMemoryStateSpec.scala | 199 + .../canton/platform/IndexComponentTest.scala | 242 ++ .../platform/apiserver/GrpcServerSpec.scala | 179 + .../platform/apiserver/SeedingSpec.scala | 20 + .../SimpleTimeServiceBackendSpec.scala | 44 + ...nfigurationSubscriptionFromIndexSpec.scala | 359 ++ .../error/ErrorInterceptorSpec.scala | 392 ++ .../LedgerTimeAwareCommandExecutorSpec.scala | 294 ++ .../ResolveMaximumLedgerTimeSpec.scala | 139 + .../StoreBackedCommandExecutorSpec.scala | 463 +++ .../TestDynamicDomainParameterGetter.scala | 22 + .../meteringreport/HmacSha256Spec.scala | 58 + .../meteringreport/JcsSignerSpec.scala | 108 + .../apiserver/meteringreport/JcsSpec.scala | 104 + .../MeteringReportGeneratorSpec.scala | 92 + .../MeteringReportKeySpec.scala | 42 + .../meteringreport/MeteringReportSpec.scala | 42 + .../ratelimiting/LimitResultSpec.scala | 40 + .../ratelimiting/MemoryCheckSpec.scala | 70 + .../RateLimitingInterceptorSpec.scala | 518 +++ .../services/ApiCommandServiceSpec.scala | 199 + .../ApiCommandSubmissionServiceSpec.scala | 179 + .../ApiConfigManagementServiceSpec.scala | 476 +++ .../admin/ApiMeteringReportServiceSpec.scala | 180 + .../ApiPackageManagementServiceSpec.scala | 270 ++ .../admin/ApiPartyManagementServiceSpec.scala | 297 ++ .../admin/ApiUserManagementServiceSpec.scala | 92 + .../command/CommandServiceImplSpec.scala | 333 ++ .../CommandSubmissionServiceImplSpec.scala | 322 ++ .../CancellableTimeoutSupportSpec.scala | 70 + .../tracking/SubmissionTrackerSpec.scala | 440 +++ .../apiserver/tls/OcspResponderFixture.scala | 118 + ...TlsCertificateRevocationCheckingSpec.scala | 116 + .../platform/apiserver/tls/TlsFixture.scala | 106 + .../platform/apiserver/tls/TlsSpec.scala | 127 + ...entityProviderConfigUpdateMapperSpec.scala | 176 + .../update/PartyRecordUpdateMapperSpec.scala | 222 ++ .../apiserver/update/UpdatePathSpec.scala | 32 + .../update/UpdatePathsTrieSpec.scala | 152 + .../update/UserUpdateMapperSpec.scala | 205 + .../validation/ErrorFactoriesSpec.scala | 778 ++++ ...oreBasedMaximumLedgerTimeServiceSpec.scala | 275 ++ .../index/InMemoryStateUpdaterSpec.scala | 664 ++++ .../platform/index/IndexServiceImplSpec.scala | 417 +++ .../indexer/MeteringAggregatorSpec.scala | 250 ++ .../RecoveringIndexerIntegrationSpec.scala | 454 +++ .../indexer/RecoveringIndexerSpec.scala | 460 +++ .../indexer/ha/EndlessReadService.scala | 227 ++ .../indexer/ha/EndlessReadServiceSpec.scala | 42 + .../indexer/ha/HaCoordinatorSpec.scala | 961 +++++ .../indexer/ha/IndexerStabilitySpec.scala | 169 + .../ha/IndexerStabilitySpecOracle.scala | 8 + .../ha/IndexerStabilitySpecPostgres.scala | 14 + .../ha/IndexerStabilityTestFixture.scala | 160 + .../indexer/ha/TestDBLockStorageBackend.scala | 227 ++ .../ha/TestDBLockStorageBackendSpec.scala | 31 + .../indexer/parallel/BatchNSpec.scala | 102 + .../BatchingParallelIngestionPipeSpec.scala | 230 ++ .../parallel/ParallelIndexerFactorySpec.scala | 312 ++ .../ParallelIndexerSubscriptionSpec.scala | 622 ++++ ...achedIdentityProviderConfigStoreSpec.scala | 180 + .../CachedUserManagementStoreSpec.scala | 225 ++ .../ConcurrentChangeControlTests.scala | 147 + .../localstore/DbDispatcherLeftOpsSpec.scala | 31 + .../IdentityProviderConfigStoreSpecBase.scala | 25 + .../IdentityProviderConfigStoreTests.scala | 423 +++ ...emoryIdentityProviderConfigStoreSpec.scala | 16 + .../InMemoryPartyRecordStoreSpec.scala | 23 + .../InMemoryUserManagementStoreSpec.scala | 24 + .../localstore/PartyRecordStoreSpecBase.scala | 29 + .../localstore/PartyRecordStoreTests.scala | 419 +++ ...entIdentityProviderConfigStoreH2Spec.scala | 12 + ...dentityProviderConfigStoreSpecOracle.scala | 12 + ...ntityProviderConfigStoreSpecPostgres.scala | 12 + ...tentIdentityProviderConfigStoreTests.scala | 22 + .../PersistentPartyRecordStoreH2Spec.scala | 12 + ...PersistentPartyRecordStoreSpecOracle.scala | 12 + ...rsistentPartyRecordStoreSpecPostgres.scala | 12 + .../PersistentPartyRecordStoreTests.scala | 81 + .../localstore/PersistentStoreSpecBase.scala | 104 + .../PersistentUserStoreH2Spec.scala | 12 + .../PersistentUserStoreSpecOracle.scala | 12 + .../PersistentUserStoreSpecPostgres.scala | 12 + .../localstore/PersistentUserStoreTests.scala | 84 + .../localstore/UserStoreSpecBase.scala | 28 + .../platform/localstore/UserStoreTests.scala | 692 ++++ .../MultiDomainIndexComponentTest.scala | 63 + .../DeduplicatingPackageLoaderSpec.scala | 160 + .../store/CompletionFromTransactionSpec.scala | 141 + .../platform/store/FlywayMigrationsSpec.scala | 105 + .../platform/store/backend/DbDtoEq.scala | 45 + .../platform/store/backend/DbDtoEqSpec.scala | 38 + .../DbDtoToStringsForInterningSpec.scala | 258 ++ .../store/backend/PruningDtoQueries.scala | 76 + .../backend/StorageBackendProvider.scala | 164 + .../store/backend/StorageBackendSpec.scala | 132 + .../store/backend/StorageBackendSpecH2.scala | 11 + .../backend/StorageBackendSpecOracle.scala | 11 + .../backend/StorageBackendSpecPostgres.scala | 57 + .../store/backend/StorageBackendSuite.scala | 30 + .../backend/StorageBackendTestValues.scala | 413 +++ .../StorageBackendTestsCompletions.scala | 262 ++ .../StorageBackendTestsContracts.scala | 379 ++ .../backend/StorageBackendTestsDBLock.scala | 184 + .../backend/StorageBackendTestsEvents.scala | 504 +++ .../StorageBackendTestsIDPConfig.scala | 194 + .../StorageBackendTestsIngestion.scala | 189 + .../StorageBackendTestsInitialization.scala | 58 + ...orageBackendTestsInitializeIngestion.scala | 391 ++ .../StorageBackendTestsIntegrity.scala | 63 + ...torageBackendTestsMeteringParameters.scala | 60 + .../StorageBackendTestsMigrationPruning.scala | 90 + ...orageBackendTestsParticipantMetadata.scala | 170 + .../StorageBackendTestsPartyRecord.scala | 198 + .../backend/StorageBackendTestsPruning.scala | 488 +++ .../StorageBackendTestsReadMetering.scala | 173 + ...torageBackendTestsReassignmentEvents.scala | 817 ++++ .../backend/StorageBackendTestsReset.scala | 194 + .../StorageBackendTestsStringInterning.scala | 53 + .../StorageBackendTestsTimestamps.scala | 61 + ...BackendTestsTransactionStreamsEvents.scala | 122 + .../StorageBackendTestsUserManagement.scala | 523 +++ .../StorageBackendTestsWriteMetering.scala | 117 + .../store/backend/UpdateToDbDtoSpec.scala | 2461 ++++++++++++ .../backend/UpdateToMeteringDbDtoSpec.scala | 167 + .../backend/common/ComposableQuerySpec.scala | 153 + .../h2/H2DataSourceStorageBackendSpec.scala | 51 + .../store/cache/ContractStateCachesSpec.scala | 186 + .../cache/InMemoryFanoutBufferSpec.scala | 538 +++ ...bleCacheBackedContractStoreRaceTests.scala | 497 +++ .../MutableCacheBackedContractStoreSpec.scala | 445 +++ .../platform/store/cache/StateCacheSpec.scala | 292 ++ .../store/dao/BufferedStreamsReaderSpec.scala | 489 +++ .../BufferedTransactionByIdReaderSpec.scala | 108 + .../dao/EventProjectionPropertiesSpec.scala | 480 +++ .../JdbcLedgerDaoActiveContractsSpec.scala | 467 +++ .../store/dao/JdbcLedgerDaoBackend.scala | 191 + .../dao/JdbcLedgerDaoBackendH2Database.scala | 16 + .../dao/JdbcLedgerDaoBackendOracle.scala | 14 + .../dao/JdbcLedgerDaoBackendPostgresql.scala | 18 + .../dao/JdbcLedgerDaoCompletionsSpec.scala | 294 ++ .../dao/JdbcLedgerDaoConfigurationSpec.scala | 86 + .../dao/JdbcLedgerDaoContractsSpec.scala | 180 + .../dao/JdbcLedgerDaoDivulgenceSpec.scala | 199 + .../store/dao/JdbcLedgerDaoEventsSpec.scala | 212 ++ .../dao/JdbcLedgerDaoExceptionSpec.scala | 188 + .../dao/JdbcLedgerDaoH2DatabaseSpec.scala | 25 + .../store/dao/JdbcLedgerDaoPackagesSpec.scala | 99 + .../store/dao/JdbcLedgerDaoPartiesSpec.scala | 269 ++ .../store/dao/JdbcLedgerDaoSpecOracle.scala | 24 + .../store/dao/JdbcLedgerDaoSpecPostgres.scala | 27 + .../store/dao/JdbcLedgerDaoSuite.scala | 914 +++++ .../JdbcLedgerDaoTransactionTreesSpec.scala | 339 ++ .../dao/JdbcLedgerDaoTransactionsSpec.scala | 772 ++++ .../JdbcLedgerDaoTransactionsWriterSpec.scala | 72 + .../store/dao/SequentialWriteDaoSpec.scala | 342 ++ .../store/dao/events/ACSReaderSpec.scala | 321 ++ .../dao/events/GroupContiguousSpec.scala | 56 + ...PekkoStreamParallelBatchedLoaderSpec.scala | 219 ++ .../events/TransactionConversionSpec.scala | 63 + .../platform/store/dao/events/UtilsSpec.scala | 22 + .../platform/store/entries/LedgerEntry.scala | 27 + .../store/interning/MockStringInterning.scala | 92 + .../interning/RawStringInterningSpec.scala | 107 + .../interning/StringInterningDomainSpec.scala | 96 + .../interning/StringInterningViewSpec.scala | 312 ++ .../DbConnectionAndDataSourceAroundEach.scala | 66 + .../store/migration/DbDataTypes.scala | 89 + .../migration/MigrationTestSupport.scala | 138 + .../OracleAroundEachForMigrations.scala | 15 + .../MigrationFrom111To116TestPostgres.scala | 609 +++ .../PostgresAroundEachForMigrations.scala | 18 + .../RemovalOfJavaMigrationsPostgres.scala | 69 + .../packagemeta/PackageMetadataSpec.scala | 205 + .../packagemeta/PackageMetadataViewSpec.scala | 68 + .../store/testing/oracle/OracleAround.scala | 200 + .../testing/oracle/OracleAroundAll.scala | 23 + .../testing/oracle/OracleAroundEach.scala | 31 + .../testing/oracle/OracleAroundSuite.scala | 31 + .../testing/postgresql/PostgresAround.scala | 120 + .../postgresql/PostgresAroundAll.scala | 23 + .../postgresql/PostgresAroundEach.scala | 36 + .../postgresql/PostgresAroundSuite.scala | 31 + .../testing/postgresql/PostgresDatabase.scala | 23 + .../testing/postgresql/PostgresResource.scala | 19 + .../testing/postgresql/PostgresServer.scala | 12 + .../store/utils/ConcurrencyLimiterSpec.scala | 129 + .../ConcurrentBufferedProcessLogger.scala | 19 + .../util/api/TimestampConversionTest.scala | 31 + .../ExtractSingleMaterializedValueTest.scala | 77 + .../util/pekkostreams/MaxInFlightTest.scala | 135 + .../ledger/ledger-api-errors-README.md | 13 + .../canton/ledger/sandbox/MetricsOwner.scala | 0 .../store/interning/BenchmarkState.scala | 65 + .../InitializationTimeBenchmark.scala | 44 + .../store/interning/UpdateTimeBenchmark.scala | 44 + .../indexer-benchmark-README.md | 3 + .../resources/indexer-benchmark-logback.xml | 17 + .../canton/ledger/api/auth/Main.scala | 265 ++ .../ledger/indexerbenchmark/Config.scala | 149 + .../indexerbenchmark/IndexerBenchmark.scala | 217 ++ .../IndexerBenchmarkResult.scala | 192 + .../canton/ledger/metering/Main.scala | 150 + .../ledger/ledger-common/errors-README.md | 103 + .../configuration/ledger_configuration.proto | 59 + .../main/protobuf/ledger_configuration.rst | 91 + .../digitalasset/canton/caching/Cache.scala | 69 + .../canton/caching/CaffeineCache.scala | 106 + .../caching/DropwizardStatsCounter.scala | 32 + .../canton/caching/MappedCache.scala | 31 + .../digitalasset/canton/caching/NoCache.scala | 16 + .../canton/caching/SizedCache.scala | 45 + .../ledger/api/DeduplicationPeriod.scala | 65 + .../canton/ledger/api/domain/package.scala | 50 + .../ledger/api/refinements/ApiTypes.scala | 63 + .../ledger/api/tls/DecryptionParameters.scala | 107 + .../ledger/api/tls/OcspProperties.scala | 24 + .../ledger/api/tls/ProtocolDisabler.scala | 35 + .../canton/ledger/api/tls/SecretsUrl.scala | 25 + .../ledger/api/tls/TlsConfiguration.scala | 215 ++ .../canton/ledger/api/tls/TlsInfo.scala | 28 + .../canton/ledger/api/tls/TlsVersion.scala | 27 + .../ledger/api/util/DurationConversion.scala | 16 + .../util/LedgerEffectiveTimeTolerance.scala | 15 + .../canton/ledger/api/util/TimeProvider.scala | 35 + .../ledger/api/util/TimestampConversion.scala | 82 + .../ledger/api/util/ToleranceWindow.scala | 12 + .../ledger/configuration/Configuration.scala | 131 + .../configuration/LedgerTimeModel.scala | 88 + .../canton/ledger/configuration/package.scala | 11 + .../canton/ledger/error/CommonErrors.scala | 128 + .../canton/ledger/error/IndexErrors.scala | 87 + .../canton/ledger/error/LedgerApiErrors.scala | 217 ++ .../ledger/error/PackageServiceErrors.scala | 268 ++ .../ledger/error/ParticipantErrorGroup.scala | 38 + .../error/groups/AdminServiceErrors.scala | 67 + .../groups/AuthorizationChecksErrors.scala | 98 + .../error/groups/CommandExecutionErrors.scala | 682 ++++ .../error/groups/ConsistencyErrors.scala | 264 ++ .../IdentityProviderConfigServiceErrors.scala | 136 + .../groups/PartyManagementServiceErrors.scala | 156 + .../groups/RequestValidationErrors.scala | 316 ++ .../groups/UserManagementServiceErrors.scala | 153 + .../groups/WriteServiceRejectionErrors.scala | 176 + .../canton/ledger/offset/Offset.scala | 49 + .../participant/state/v2/ChangeId.scala | 33 + .../state/v2/SubmissionResult.scala | 44 + .../canton/metrics/CommandMetrics.scala | 101 + .../metrics/DatabaseMetricsFactory.scala | 15 + .../canton/metrics/ExecutionMetrics.scala | 208 ++ .../IdentityProviderConfigStoreMetrics.scala | 23 + .../canton/metrics/IndexDBMetrics.scala | 381 ++ .../canton/metrics/IndexMetrics.scala | 130 + .../metrics/IndexedUpdatesMetrics.scala | 71 + .../canton/metrics/IndexerMetrics.scala | 57 + .../canton/metrics/LAPIMetrics.scala | 96 + .../digitalasset/canton/metrics/Metrics.scala | 116 + .../metrics/ParallelIndexerMetrics.scala | 96 + .../metrics/PartyRecordStoreMetrics.scala | 21 + .../canton/metrics/PruningMetrics.scala | 17 + .../canton/metrics/ServicesMetrics.scala | 253 ++ .../metrics/UserManagementMetrics.scala | 26 + .../platform/api/v1/event/EventOps.scala | 119 + .../platform/common/MismatchException.scala | 23 + .../ParticipantIdNotFoundException.scala | 9 + .../participant/util/LfEngineToApi.scala | 176 + .../pekkostreams/dispatcher/Dispatcher.scala | 83 + .../dispatcher/DispatcherImpl.scala | 170 + .../dispatcher/SignalDispatcher.scala | 127 + .../pekkostreams/dispatcher/SubSource.scala | 35 + .../services/time/TimeProviderType.scala | 20 + .../src/test/resources/application.conf | 5 + .../test/resources/gen-test-certificates.sh | 162 + .../openssl-alternative-template.cnf | 90 + .../src/test/resources/openssl-template.cnf | 90 + .../test/resources/test-certificates/ca.crt | 29 + .../test/resources/test-certificates/ca.key | 52 + .../test-certificates/ca_alternative.crt | 29 + .../test-certificates/ca_alternative.key | 52 + .../test-certificates/ca_alternative.pem | 52 + .../test-certificates/client-revoked.crt | 25 + .../test-certificates/client-revoked.csr | 16 + .../test-certificates/client-revoked.key | 28 + .../test-certificates/client-revoked.pem | 28 + .../resources/test-certificates/client.crt | 31 + .../resources/test-certificates/client.csr | 27 + .../resources/test-certificates/client.key | 52 + .../resources/test-certificates/client.pem | 52 + .../resources/test-certificates/index.txt | 4 + .../test/resources/test-certificates/ocsp.crt | 30 + .../test/resources/test-certificates/ocsp.csr | 26 + .../resources/test-certificates/ocsp.key.pem | 52 + .../resources/test-certificates/server.crt | 31 + .../resources/test-certificates/server.csr | 27 + .../resources/test-certificates/server.key | 52 + .../resources/test-certificates/server.pem | 52 + .../test-certificates/server.pem.enc | 69 + .../test-models/benchtool-tests-1.15.dar | Bin 0 -> 299379 bytes .../test-models/daml-lf/encoder/test-1.11.dar | Bin 0 -> 3159 bytes .../test-models/daml-lf/encoder/test-1.12.dar | Bin 0 -> 3161 bytes .../test-models/daml-lf/encoder/test-1.13.dar | Bin 0 -> 3435 bytes .../test-models/daml-lf/encoder/test-1.14.dar | Bin 0 -> 3703 bytes .../test-models/daml-lf/encoder/test-1.15.dar | Bin 0 -> 4181 bytes .../test-models/daml-lf/encoder/test-1.6.dar | Bin 0 -> 2874 bytes .../test-models/daml-lf/encoder/test-1.7.dar | Bin 0 -> 3060 bytes .../test-models/daml-lf/encoder/test-1.8.dar | Bin 0 -> 3260 bytes .../daml-lf/encoder/test-1.dev.dar | Bin 0 -> 4550 bytes .../test-models/model-tests-1.15.dar | Bin 0 -> 497148 bytes .../test-models/semantic-tests-1.15.dar | Bin 0 -> 602956 bytes .../ConcurrentCacheBehaviorSpecBase.scala | 23 + .../ConcurrentCacheCachingSpecBase.scala | 96 + .../ConcurrentCacheEvictionSpecBase.scala | 39 + .../caching/ConcurrentCacheSpecBase.scala | 10 + .../caching/MapBackedCacheForTesting.scala | 26 + .../MapBackedCacheForTestingSpec.scala | 20 + .../canton/caching/MappedCacheSpec.scala | 71 + .../canton/caching/NoCacheSpec.scala | 59 + .../canton/caching/SizedCacheSpec.scala | 20 + .../ledger/api/DeduplicationPeriodSpec.scala | 55 + .../ledger/api/grpc/GrpcClientResource.scala | 47 + .../api/tls/DecryptionParametersTest.scala | 131 + .../ledger/api/tls/ProtocolDisablerTest.scala | 74 + .../ledger/api/tls/SecretsUrlTest.scala | 61 + .../ledger/api/tls/TlsConfigurationTest.scala | 196 + .../api/util/TimestampConversionSpec.scala | 144 + .../configuration/ConfigurationSpec.scala | 226 ++ .../configuration/LedgerTimeModelSpec.scala | 144 + .../error/testpackage/DeprecatedError.scala | 26 + .../error/testpackage/SeriousError.scala | 27 + .../subpackage/MildErrorsParent.scala | 42 + .../checks/ledgerid/GetLedgerIdentityIT.scala | 33 + .../participant/util/ValueConversions.scala | 90 + .../pekkostreams/FutureTimeouts.scala | 37 + .../dispatcher/DispatcherSpec.scala | 400 ++ .../dispatcher/DispatcherTest.scala | 82 + .../dispatcher/SignalDispatcherTest.scala | 136 + .../canton/testing/utils/TestModels.scala | 21 + .../testing/utils/TestResourceUtils.scala | 30 + .../ledger/ledger-json-api/README.md | 79 + .../src/main/resources/application.conf | 1 + .../canton/cliopts/GlobalLogLevel.scala | 20 + .../digitalasset/canton/cliopts/Http.scala | 8 + .../digitalasset/canton/cliopts/Metrics.scala | 50 + .../digitalasset/canton/cliopts/package.scala | 13 + .../lf/value/json/ApiCodecCompressed.scala | 399 ++ .../lf/value/json/ApiValueImplicits.scala | 43 + .../daml/lf/value/json/JsonVariant.scala | 24 + .../lf/value/json/NavigatorModelAliases.scala | 78 + .../canton/fetchcontracts/AcsTxStreams.scala | 167 + .../canton/fetchcontracts/domain.scala | 167 + .../fetchcontracts/util/BeginBookmark.scala | 53 + .../fetchcontracts/util/ClientUtil.scala | 11 + .../util/ContractStreamStep.scala | 91 + .../fetchcontracts/util/GraphExtensions.scala | 59 + .../util/IdentifierConverters.scala | 24 + .../util/InsertDeleteStep.scala | 84 + .../util/PekkoStreamsUtils.scala | 50 + .../canton/http/CommandService.scala | 364 ++ .../canton/http/ContractsService.scala | 569 +++ .../digitalasset/canton/http/Endpoints.scala | 503 +++ .../canton/http/EndpointsCompanion.scala | 345 ++ .../canton/http/ErrorMessages.scala | 24 + .../canton/http/HealthService.scala | 54 + .../canton/http/HttpApiConfig.scala | 28 + .../canton/http/HttpApiServer.scala | 55 + .../canton/http/HttpService.scala | 292 ++ .../canton/http/JsonApiConfig.scala | 62 + .../canton/http/LedgerClientJwt.scala | 462 +++ .../canton/http/MeteringReportService.scala | 21 + .../http/PackageManagementService.scala | 43 + .../canton/http/PackageService.scala | 538 +++ .../canton/http/PartiesService.scala | 105 + .../canton/http/StartSettings.scala | 25 + .../canton/http/StaticContentEndpoints.scala | 65 + .../canton/http/WebSocketService.scala | 1055 ++++++ .../canton/http/WebsocketEndpoints.scala | 183 + .../http/admin/GetPackageResponse.scala | 34 + .../com/digitalasset/canton/http/domain.scala | 1110 ++++++ .../canton/http/endpoints/ContractList.scala | 173 + .../http/endpoints/CreateAndExercise.scala | 154 + .../endpoints/MeteringReportEndpoint.scala | 100 + .../http/endpoints/PackagesAndDars.scala | 69 + .../canton/http/endpoints/Parties.scala | 54 + .../canton/http/endpoints/RouteSetup.scala | 228 ++ .../http/endpoints/UserManagement.scala | 213 ++ .../json/ApiValueToJsValueConverter.scala | 30 + .../canton/http/json/DomainJsonDecoder.scala | 263 ++ .../canton/http/json/DomainJsonEncoder.scala | 76 + .../canton/http/json/ExtraFormats.scala | 23 + .../canton/http/json/HttpCodec.scala | 57 + .../json/JsValueToApiValueConverter.scala | 55 + .../canton/http/json/JsonError.scala | 14 + .../canton/http/json/JsonProtocol.scala | 647 ++++ .../canton/http/json/ResponseFormats.scala | 77 + .../canton/http/json/SprayJson.scala | 127 + .../canton/http/metrics/HttpApiMetrics.scala | 78 + .../canton/http/query/ValuePredicate.scala | 13 + .../util/ApiValueToLfValueConverter.scala | 29 + .../canton/http/util/ClientUtil.scala | 24 + .../canton/http/util/Collections.scala | 30 + .../canton/http/util/Commands.scala | 105 + .../canton/http/util/ErrorOps.scala | 31 + .../canton/http/util/FlowUtil.scala | 24 + .../canton/http/util/FutureUtil.scala | 43 + .../canton/http/util/GrpcHttpErrorCodes.scala | 39 + .../http/util/IdentifierConverters.scala | 44 + .../canton/http/util/JwtParties.scala | 42 + .../canton/http/util/LedgerOffsetUtil.scala | 10 + .../canton/http/util/Logging.scala | 40 + .../canton/http/util/NewBoolean.scala | 42 + .../http/util/ProtobufByteStrings.scala | 27 + .../canton/http/util/Transactions.scala | 28 + .../canton/http/util/package.scala | 13 + .../canton/ledger/service/Grpc.scala | 27 + .../canton/ledger/service/LedgerReader.scala | 219 ++ .../ledger/service/MetadataReader.scala | 89 + .../canton/ledger/service/TemplateIds.scala | 36 + .../pureconfigutils/SharedConfigReaders.scala | 103 + .../src/test/daml/JsonEncodingTest.daml | 55 + .../ledger-json-api/src/test/daml/daml.yaml | 10 + .../value/json/ApiCodecCompressedSpec.scala | 590 +++ .../fetchcontracts/AcsTxStreamsTest.scala | 114 + .../util/ContractStreamStepTest.scala | 88 + .../util/InsertDeleteStepTest.scala | 78 + .../canton/http/CommandServiceTest.scala | 178 + .../digitalasset/canton/http/DomainSpec.scala | 54 + .../digitalasset/canton/http/Generators.scala | 178 + .../canton/http/GeneratorsTest.scala | 30 + .../canton/http/PackageServiceTest.scala | 142 + .../MeteringReportEndpointTest.scala | 65 + .../http/endpoints/RouteSetupTest.scala | 22 + .../canton/http/json/JsonProtocolTest.scala | 350 ++ .../http/json/ResponseFormatsTest.scala | 101 + .../util/ApiValueToLfValueConverterTest.scala | 36 + .../canton/http/util/FlowUtilTest.scala | 52 + .../canton/http/util/JwtPartiesTest.scala | 119 + .../http/util/LedgerOffsetUtilTest.scala | 40 + .../ledger/metrics/collectd/README.md | 56 + .../ledger/metrics/collectd/collectd.conf | 81 + .../ledger/metrics/collection/README.md | 59 + .../metrics/collection/docker-compose.yml | 50 + .../grafana/dashboards/collectd.json | 864 +++++ .../dashboards/graphite-carbon-metrics.json | 203 + .../dashboards/ledger-submissions.json | 1916 ++++++++++ .../dashboards/transaction-stream.json | 161 + .../provisioning/dashboards/dashboards.yaml | 17 + .../provisioning/datasources/graphite.yaml | 22 + .../metrics/collection/graphite/.gitignore | 2 + .../default_conf/storage-aggregation.conf | 32 + .../default_conf/storage-schemas.conf | 26 + .../collection/graphite/run_once/init-config | 6 + .../ledger/metrics/collection/reset-all.sh | 11 + .../ledger/metrics/collection/summary.js | 93 + .../lib/daml/simple_package_list/Simple.daml | 30 + .../daml/simple_package_optional/Simple.daml | 30 + .../lib/daml/simple_package_party/Simple.daml | 30 + .../daml/simple_package_text_map/Simple.daml | 34 + .../lib/daml/simple_package_tuple/Simple.daml | 30 + .../daml/simple_package_variant/Simple.daml | 33 + .../crypto/digests/canton/Blake2bDigest.java | 456 +++ canton-3x/community/lib/README.md | 12 + .../community/lib/daml-copy-common-0/.gitkeep | 0 .../community/lib/daml-copy-common-1/.gitkeep | 0 .../community/lib/daml-copy-common-2/.gitkeep | 0 .../community/lib/daml-copy-common-3/.gitkeep | 0 .../community/lib/daml-copy-common-4/.gitkeep | 0 .../community/lib/daml-copy-common-5/.gitkeep | 0 .../community/lib/daml-copy-common/.gitkeep | 0 .../com/daml/bazeltools/BazelRunfiles.scala | 14 + .../protobuf-daml-symlinks/archive/com | 1 + .../protobuf-daml-symlinks/transaction/com | 1 + .../lib/daml-copy-testing-0/.gitkeep | 0 .../ledger-resources-test-symlink/scala/com | 1 + .../scala/com | 1 + .../scala/com | 1 + .../ledger-api-sample-service/hello.proto | 1 + .../rs-grpc-bridge-test-symlink/java/com | 1 + .../lib/daml-copy-testing-1/.gitkeep | 0 .../community/lib/daml-copy-testing/.gitkeep | 0 .../rs-grpc-pekko-test-symlink/scala/com | 1 + .../sample-service-test-symlink/scala/com | 1 + canton-3x/community/lib/pekko/LICENSE | 482 +++ .../pekko/stream/scaladsl/BroadcastHub.scala | 466 +++ .../stream/scaladsl/BroadcastHubSpec.scala | 296 ++ .../pekko/stream/testkit/StreamSpec.scala | 62 + .../apache/pekko/stream/testkit/Utils.scala | 35 + .../org/apache/pekko/testkit/Coroner.scala | 289 ++ .../org/apache/pekko/testkit/PekkoSpec.scala | 127 + canton-3x/community/lib/slick/LICENSE.txt | 25 + .../scala/slick/jdbc/canton/StaticQuery.scala | 74 + .../digitalasset/canton/DiscardedFuture.scala | 127 + .../canton/FutureLikeTester.scala | 71 + .../digitalasset/canton/FutureTraverse.scala | 199 + .../canton/GlobalExecutionContext.scala | 66 + .../digitalasset/canton/NonUnitForEach.scala | 157 + .../digitalasset/canton/RequireBlocking.scala | 63 + .../com/digitalasset/canton/SlickString.scala | 77 + .../com/digitalasset/canton/TryFailed.scala | 43 + .../canton/DiscardedFutureTest.scala | 181 + .../canton/FutureTraverseTest.scala | 197 + .../canton/GlobalExecutionContextTest.scala | 54 + .../canton/NonUnitForEachTest.scala | 138 + .../canton/RequireBlockingTest.scala | 115 + .../digitalasset/canton/SlickStringTest.scala | 91 + .../digitalasset/canton/TryFailedTest.scala | 48 + .../src/main/daml/AdminWorkflows.daml | 6 + .../participant/src/main/daml/PingPong.daml | 223 ++ .../participant/src/main/daml/daml.yaml | 8 + .../daml/ping-pong-vacuum/PingPongVacuum.daml | 102 + .../src/main/daml/ping-pong-vacuum/daml.yaml | 12 + .../participant/src/main/protobuf/buf.yaml | 4 + .../com/daml/ledger/api/v1/package.proto | 14 + .../admin/v0/domain_connectivity.proto | 150 + ...rise_participant_replication_service.proto | 16 + .../admin/v0/inspection_service.proto | 80 + .../admin/v0/package_service.proto | 154 + .../admin/v0/participant_repair_service.proto | 112 + .../admin/v0/party_name_management.proto | 26 + .../admin/v0/ping_pong_service.proto | 34 + .../admin/v0/pruning_service.proto | 77 + .../v0/resource_management_service.proto | 31 + .../admin/v0/traffic_control_service.proto | 23 + .../admin/v0/transfer_service.proto | 70 + .../admin/v1/participant_repair_service.proto | 17 + .../protocol/v0/ledger_sync_event.proto | 230 ++ .../protocol/v0/submission_tracking.proto | 32 + .../canton/participant/scalapb/package.proto | 14 + .../src/main/resources/dar/AdminWorkflows.dar | Bin 0 -> 317933 bytes .../src/main/resources/ledger-api/VERSION | 1 + .../canton/participant/GlobalOffset.scala | 52 + .../canton/participant/LocalOffset.scala | 144 + .../canton/participant/ParticipantNode.scala | 672 ++++ .../participant/ParticipantNodeCommon.scala | 615 +++ .../ParticipantNodeParameters.scala | 96 + .../canton/participant/ParticipantNodeX.scala | 445 +++ .../canton/participant/Pruning.scala | 54 + .../admin/AdminWorkflowConfig.scala | 26 + .../admin/AdminWorkflowService.scala | 18 + .../admin/AdminWorkflowServices.scala | 453 +++ .../admin/CantonPackageServiceError.scala | 157 + .../admin/DomainConnectivityService.scala | 235 ++ .../admin/PackageDependencyResolver.scala | 116 + .../canton/participant/admin/PackageOps.scala | 282 ++ .../participant/admin/PackageService.scala | 505 +++ .../participant/admin/PingService.scala | 896 +++++ .../participant/admin/ResourceLimits.scala | 61 + .../admin/ResourceManagementService.scala | 115 + .../participant/admin/TransferService.scala | 149 + .../admin/data/ActiveContract.scala | 151 + .../SerializableContractWithDomainId.scala | 111 + .../grpc/GrpcDomainConnectivityService.scala | 86 + .../admin/grpc/GrpcInspectionService.scala | 114 + .../admin/grpc/GrpcPackageService.scala | 193 + .../grpc/GrpcParticipantRepairService.scala | 679 ++++ .../grpc/GrpcPartyNameManagementService.scala | 46 + .../admin/grpc/GrpcPingService.scala | 49 + .../admin/grpc/GrpcPruningService.scala | 256 ++ .../grpc/GrpcResourceManagementService.scala | 30 + .../grpc/GrpcTrafficControlService.scala | 62 + .../admin/grpc/GrpcTransferService.scala | 226 ++ .../admin/inspection/AcsInspection.scala | 162 + .../participant/admin/inspection/Error.scala | 44 + .../inspection/SyncStateInspection.scala | 501 +++ .../inspection/TimestampValidation.scala | 44 + .../admin/repair/MigrateContracts.scala | 463 +++ .../admin/repair/RepairContext.scala | 48 + .../admin/repair/RepairRequest.scala | 56 + .../admin/repair/RepairService.scala | 1482 ++++++++ .../admin/repair/RepairServiceError.scala | 172 + .../participant/admin/repair/package.scala | 51 + .../admin/workflows/PackageID.scala | 10 + .../config/AuthServiceConfig.scala | 130 + .../config/LocalParticipantConfig.scala | 568 +++ .../config/ParticipantInitConfig.scala | 65 + .../participant/domain/AgreementClient.scala | 39 + .../participant/domain/AgreementService.scala | 107 + .../domain/DomainAliasManager.scala | 120 + .../domain/DomainConnectionConfig.scala | 206 ++ .../participant/domain/DomainRegistry.scala | 412 +++ .../domain/DomainRegistryHelpers.scala | 470 +++ .../domain/DomainServiceClient.scala | 35 + .../ParticipantInitializeTopology.scala | 364 ++ .../domain/grpc/GrpcDomainRegistry.scala | 174 + .../participant/event/AcsChangeListener.scala | 205 + .../event/RecordOrderPublisher.scala | 466 +++ .../canton/participant/event/RecordTime.scala | 41 + .../api/CantonAdminTokenAuthService.scala | 64 + .../api/CantonExternalClockBackend.scala | 29 + .../api/CantonLedgerApiServerWrapper.scala | 266 ++ .../ledger/api/CantonTimeServiceBackend.scala | 37 + .../ledger/api/JwtTokenUtilities.scala | 42 + .../ledger/api/LedgerApiJdbcUrl.scala | 300 ++ .../ledger/api/LedgerApiStorage.scala | 111 + ...eStoppableLedgerApiDependentServices.scala | 139 + .../StartableStoppableLedgerApiServer.scala | 441 +++ .../client/CommandSubmitterWithRetry.scala | 195 + .../ledger/api/client/JavaDecodeUtil.scala | 113 + .../ledger/api/client/LedgerConnection.scala | 482 +++ .../ledger/api/client/ValueRemapper.scala | 83 + .../metrics/ParticipantMetrics.scala | 242 ++ .../participant/metrics/PruningMetrics.scala | 56 + .../TransactionProcessingMetrics.scala | 50 + .../canton/participant/package.scala | 18 + .../protocol/AbstractMessageProcessor.scala | 209 ++ .../protocol/AuthenticationValidator.scala | 77 + .../protocol/AuthorizationValidator.scala | 95 + .../BadRootHashMessagesRequestProcessor.scala | 122 + .../protocol/CanSubmitTransfer.scala | 70 + .../protocol/DefaultMessageDispatcher.scala | 210 ++ .../protocol/MessageDispatcher.scala | 956 +++++ .../MessageProcessingStartingPoint.scala | 196 + ...ticipantTopologyTerminateProcessingX.scala | 92 + .../protocol/Phase37Synchronizer.scala | 264 ++ .../protocol/ProcessingSteps.scala | 612 +++ .../protocol/ProtocolProcessor.scala | 1902 ++++++++++ .../protocol/RepairProcessor.scala | 63 + .../protocol/RequestCounterAllocator.scala | 145 + .../participant/protocol/RequestJournal.scala | 473 +++ ...erializableContractAuthenticatorImpl.scala | 103 + .../protocol/SubmissionTracker.scala | 333 ++ .../TopologyTransactionsToEventsX.scala | 118 + .../protocol/TransactionProcessingSteps.scala | 1651 +++++++++ .../protocol/TransactionProcessor.scala | 414 +++ .../conflictdetection/ActivenessResult.scala | 71 + .../conflictdetection/ActivenessSet.scala | 93 + .../conflictdetection/CommitSet.scala | 142 + .../conflictdetection/ConflictDetector.scala | 772 ++++ ...legalConflictDetectionStateException.scala | 10 + .../conflictdetection/LockableState.scala | 297 ++ .../conflictdetection/LockableStates.scala | 666 ++++ .../conflictdetection/LockableStatus.scala | 70 + .../NaiveRequestTracker.scala | 644 ++++ .../conflictdetection/RequestTracker.scala | 403 ++ .../protocol/submission/ChangeId.scala | 34 + .../submission/CommandDeduplicator.scala | 261 ++ .../ConfirmationRequestFactory.scala | 363 ++ .../ContractEnrichmentFactory.scala | 35 + .../protocol/submission/DomainsFilter.scala | 55 + .../EncryptedViewMessageFactory.scala | 380 ++ .../submission/InFlightSubmission.scala | 225 ++ .../InFlightSubmissionTracker.scala | 527 +++ .../protocol/submission/SeedGenerator.scala | 17 + .../submission/SerializableSubmissionId.scala | 41 + .../submission/SubmissionTrackingData.scala | 273 ++ .../submission/TransactionTreeFactory.scala | 170 + .../TransactionTreeFactoryImpl.scala | 667 ++++ .../TransactionTreeFactoryImplV3.scala | 440 +++ .../protocol/submission/UsableDomain.scala | 188 + .../submission/WatermarkTracker.scala | 194 + .../routing/AdmissibleDomains.scala | 186 + .../routing/ContractsDomainData.scala | 49 + .../routing/ContractsTransfer.scala | 138 + .../submission/routing/DomainRank.scala | 150 + .../submission/routing/DomainRouter.scala | 410 ++ .../submission/routing/DomainSelector.scala | 294 ++ .../routing/DomainStateProvider.scala | 77 + .../submission/routing/TransactionData.scala | 145 + .../AdminPartiesAndParticipants.scala | 181 + .../transfer/AutomaticTransferIn.scala | 182 + .../transfer/IncompleteTransferData.scala | 133 + .../PartyParticipantPermissions.scala | 115 + .../transfer/RecentTimeProofProvider.scala | 73 + .../transfer/TransferCoordination.scala | 285 ++ .../protocol/transfer/TransferData.scala | 207 ++ .../transfer/TransferInProcessingSteps.scala | 727 ++++ .../transfer/TransferInProcessor.scala | 68 + .../transfer/TransferInValidation.scala | 300 ++ .../transfer/TransferKnownAndVetted.scala | 42 + .../transfer/TransferOutProcessingSteps.scala | 799 ++++ .../transfer/TransferOutProcessor.scala | 68 + .../transfer/TransferOutProcessorError.scala | 109 + .../transfer/TransferOutRequest.scala | 155 + .../TransferOutRequestValidated.scala | 11 + .../transfer/TransferOutValidation.scala | 113 + ...ValidationNonTransferringParticipant.scala | 118 + ...OutValidationTransferringParticipant.scala | 112 + .../transfer/TransferOutValidationUtil.scala | 42 + .../transfer/TransferProcessingSteps.scala | 530 +++ .../ConfirmationResponseFactory.scala | 357 ++ .../ContractConsistencyChecker.scala | 46 + .../validation/ExtractUsedAndCreated.scala | 389 ++ .../ExtractUsedContractsFromRootViews.scala | 32 + .../InternalConsistencyChecker.scala | 396 ++ .../validation/ModelConformanceChecker.scala | 558 +++ .../validation/PendingTransaction.scala | 31 + .../validation/RecipientsValidator.scala | 338 ++ .../protocol/validation/TimeValidator.scala | 82 + .../TransactionValidationResult.scala | 53 + .../protocol/validation/UsedAndCreated.scala | 115 + .../validation/ViewValidationResult.scala | 31 + .../pruning/AcsCommitmentProcessor.scala | 1317 +++++++ .../pruning/NoOpPruningProcessor.scala | 49 + .../participant/pruning/PruneObserver.scala | 143 + .../SortedReconciliationIntervals.scala | 188 + ...ortedReconciliationIntervalsProvider.scala | 69 + ...conciliationIntervalsProviderFactory.scala | 67 + .../ParticipantPruningScheduler.scala | 85 + .../ParticipantSchedulersParameters.scala | 20 + .../store/AcsCommitmentStore.scala | 243 ++ .../store/ActiveContractStore.scala | 634 ++++ .../store/CommandDeduplicationStore.scala | 244 ++ .../store/ConflictDetectionStore.scala | 34 + .../store/ContractKeyJournal.scala | 143 + .../participant/store/ContractLookup.scala | 125 + .../participant/store/ContractStore.scala | 173 + .../participant/store/DamlLfSerializers.scala | 122 + .../participant/store/DamlPackageStore.scala | 119 + .../store/DomainConnectionConfigStore.scala | 147 + .../store/DomainParameterStore.scala | 51 + .../store/ExtendedContractLookup.scala | 77 + .../participant/store/HasPrunable.scala | 10 + .../store/InFlightSubmissionStore.scala | 242 ++ .../store/MultiDomainEventLog.scala | 505 +++ .../store/ParticipantEventLog.scala | 89 + .../store/ParticipantNodeEphemeralState.scala | 46 + .../ParticipantNodePersistentState.scala | 320 ++ .../ParticipantPruningSchedulerStore.scala | 56 + .../store/ParticipantPruningStore.scala | 61 + .../store/ParticipantSettingsStore.scala | 90 + .../store/RegisteredDomainsStore.scala | 48 + .../store/RequestJournalStore.scala | 154 + .../store/SerializableLedgerSyncEvent.scala | 1373 +++++++ .../store/ServiceAgreementStore.scala | 106 + .../store/SingleDimensionEventLog.scala | 214 ++ .../store/SubmissionTrackerStore.scala | 35 + .../store/SyncDomainEphemeralState.scala | 218 ++ .../SyncDomainEphemeralStateFactory.scala | 523 +++ .../store/SyncDomainPersistentState.scala | 173 + .../participant/store/TransferStore.scala | 341 ++ .../store/data/ActiveContractsData.scala | 54 + .../store/db/DbAcsCommitmentStore.scala | 746 ++++ .../store/db/DbActiveContractStore.scala | 1370 +++++++ .../db/DbCommandDeduplicationStore.scala | 399 ++ .../store/db/DbContractKeyJournal.scala | 320 ++ .../store/db/DbContractStore.scala | 543 +++ .../store/db/DbDamlPackageStore.scala | 353 ++ .../db/DbDomainConnectionConfigStore.scala | 198 + .../store/db/DbDomainParameterStore.scala | 89 + .../store/db/DbInFlightSubmissionStore.scala | 574 +++ .../store/db/DbMultiDomainEventLog.scala | 861 +++++ .../store/db/DbParticipantEventLog.scala | 101 + .../DbParticipantPruningSchedulerStore.scala | 164 + .../store/db/DbParticipantPruningStore.scala | 96 + .../store/db/DbParticipantSettingsStore.scala | 179 + .../store/db/DbRegisteredDomainsStore.scala | 118 + .../store/db/DbRequestJournalStore.scala | 414 +++ .../store/db/DbServiceAgreementStore.scala | 151 + .../store/db/DbSingleDimensionEventLog.scala | 407 ++ .../store/db/DbSubmissionTrackerStore.scala | 136 + .../db/DbSyncDomainPersistentState.scala | 265 ++ .../store/db/DbTransferStore.scala | 676 ++++ .../db/ParticipantStorageImplicits.scala | 47 + .../memory/InMemoryAcsCommitmentStore.scala | 396 ++ .../memory/InMemoryActiveContractStore.scala | 754 ++++ .../InMemoryCommandDeduplicationStore.scala | 125 + .../memory/InMemoryContractKeyJournal.scala | 167 + .../store/memory/InMemoryContractStore.scala | 138 + .../memory/InMemoryDamlPackageStore.scala | 156 + .../InMemoryDomainConnectionConfigStore.scala | 88 + .../memory/InMemoryDomainParameterStore.scala | 34 + .../InMemoryInFlightSubmissionStore.scala | 215 ++ .../memory/InMemoryMultiDomainEventLog.scala | 617 ++++ .../store/memory/InMemoryOffsetsLookup.scala | 49 + .../memory/InMemoryParticipantEventLog.scala | 58 + ...moryParticipantPruningSchedulerStore.scala | 72 + .../InMemoryParticipantPruningStore.scala | 50 + .../InMemoryParticipantSettingsStore.scala | 62 + .../InMemoryRegisteredDomainsStore.scala | 64 + .../memory/InMemoryRequestJournalStore.scala | 133 + .../InMemoryServiceAgreementStore.scala | 81 + .../InMemorySingleDimensionEventLog.scala | 185 + .../InMemorySubmissionTrackerStore.scala | 80 + .../InMemorySyncDomainPersistentState.scala | 121 + .../store/memory/InMemoryTransferStore.scala | 242 ++ .../store/memory/TransferCache.scala | 155 + .../sync/CantonAuthorityResolver.scala | 126 + .../CantonDynamicDomainParameterGetter.scala | 78 + .../participant/sync/CantonSyncService.scala | 2009 ++++++++++ .../sync/CommandDeduplicationError.scala | 78 + .../sync/ConnectedDomainsLookup.scala | 26 + .../sync/EventTranslationStrategy.scala | 66 + .../participant/sync/LedgerSyncEvent.scala | 643 ++++ .../sync/ParticipantEventPublisher.scala | 196 + .../participant/sync/PartyAllocation.scala | 189 + .../canton/participant/sync/SyncDomain.scala | 1044 ++++++ .../sync/SyncDomainMigration.scala | 341 ++ .../SyncDomainPersistentStateManager.scala | 457 +++ .../sync/TimelyRejectNotifier.scala | 242 ++ .../participant/sync/TimestampedEvent.scala | 191 + .../sync/TransactionRoutingError.scala | 497 +++ .../sync/UpstreamOffsetConvert.scala | 84 + .../topology/LedgerServerPartyNotifier.scala | 405 ++ .../ParticipantTopologyDispatcher.scala | 807 ++++ .../topology/ParticipantTopologyManager.scala | 563 +++ .../topology/TopologyComponentFactory.scala | 318 ++ .../topology/client/MissingKeysAlerter.scala | 130 + .../traffic/TrafficStateController.scala | 145 + .../TrafficStateTopUpSubscription.scala | 51 + .../canton/participant/util/DAMLe.scala | 347 ++ .../participant/util/LedgerApiUtil.scala | 29 + .../canton/participant/util/StateChange.scala | 42 + .../participant/util/TimeOfChange.scala | 44 + .../src/test/resources/daml/illformed.dar | Bin 0 -> 493 bytes .../src/test/resources/daml/illformed.lf | 17 + .../DefaultParticipantStateValues.scala | 68 + .../admin/GrpcTrafficControlServiceTest.scala | 80 + .../participant/admin/MockLedgerAcs.scala | 59 + .../admin/PackageOpsForTesting.scala | 47 + .../participant/admin/PackageOpsTest.scala | 490 +++ .../admin/PackageServiceTest.scala | 247 ++ .../participant/admin/PingServiceTest.scala | 138 + ...esilientTransactionsSubscriptionTest.scala | 190 + .../admin/data/GeneratorsData.scala | 30 + ...SerializableContractWithDomainIdTest.scala | 50 + .../admin/inspection/AcsInspectionTest.scala | 216 ++ .../SerializationDeserializationTest.scala | 25 + ...ParticipantDomainTopologyServiceTest.scala | 137 + .../ledger/api/LedgerApiJdbcUrlTest.scala | 191 + .../CommandSubmitterWithRetryTest.scala | 152 + .../metrics/ParticipantTestMetrics.scala | 23 + .../protocol/MessageDispatcherTest.scala | 1444 ++++++++ .../protocol/Phase37SynchronizerTest.scala | 381 ++ .../protocol/ProtocolProcessorTest.scala | 1080 ++++++ .../RequestCounterAllocatorTest.scala | 137 + .../protocol/RequestJournalTest.scala | 598 +++ ...lizableContractAuthenticatorImplTest.scala | 270 ++ .../protocol/SubmissionTrackerTest.scala | 258 ++ .../protocol/TestProcessingSteps.scala | 364 ++ .../TopologyTransactionsToEventsXTest.scala | 124 + .../TransactionProcessingStepsTest.scala | 103 + .../ActivenessCheckTest.scala | 37 + .../ConflictDetectionHelpers.scala | 286 ++ .../ConflictDetectorTest.scala | 2235 +++++++++++ .../LockableStatesTest.scala | 572 +++ .../NaiveRequestTrackerTest.scala | 138 + .../RequestTrackerTest.scala | 1316 +++++++ .../submission/CommandDeduplicatorTest.scala | 451 +++ .../ConfirmationRequestFactoryTest.scala | 627 ++++ .../submission/DomainSelectionFixture.scala | 185 + .../submission/DomainsFilterTest.scala | 178 + .../submission/NoCommandDeduplicator.scala | 39 + .../submission/SeedGeneratorTest.scala | 23 + .../TestSubmissionTrackingData.scala | 19 + .../TransactionTreeFactoryImplTest.scala | 202 + .../submission/WatermarkTrackerTest.scala | 203 + .../routing/DomainSelectorTest.scala | 561 +++ .../routing/WorkflowIdExtractionTest.scala | 93 + .../protocol/transfer/DAMLeTestInstance.scala | 71 + .../transfer/IncompleteTransferDataTest.scala | 38 + .../transfer/TestTransferCoordination.scala | 120 + .../protocol/transfer/TransferDataTest.scala | 60 + .../TransferInProcessingStepsTest.scala | 748 ++++ .../transfer/TransferInValidationTest.scala | 336 ++ .../TransferOutProcessingStepsTest.scala | 877 +++++ .../transfer/TransferOutValidationTest.scala | 191 + .../transfer/TransferResultHelpers.scala | 80 + .../ExtractUsedAndCreatedTest.scala | 196 + .../InternalConsistencyCheckerTest.scala | 81 + .../ModelConformanceCheckerTest.scala | 433 +++ .../validation/RecipientsValidatorTest.scala | 483 +++ .../validation/TimeValidatorTest.scala | 190 + .../pruning/AcsCommitmentProcessorTest.scala | 1776 +++++++++ ...SortedReconciliationIntervalsHelpers.scala | 143 + ...dReconciliationIntervalsProviderTest.scala | 108 + .../SortedReconciliationIntervalsTest.scala | 513 +++ .../store/AcsCommitmentStoreTest.scala | 646 ++++ .../store/ActiveContractStoreTest.scala | 1888 ++++++++++ .../store/CommandDeduplicationStoreTest.scala | 299 ++ .../store/ContractKeyJournalTest.scala | 263 ++ .../participant/store/ContractStoreTest.scala | 461 +++ .../store/DamlLfSerializersTest.scala | 36 + .../store/DamlPackageStoreTest.scala | 517 +++ .../DomainConnectionConfigStoreTest.scala | 144 + .../store/DomainParameterStoreTest.scala | 66 + .../store/ExtendedContractLookupTest.scala | 227 ++ .../canton/participant/store/HookedAcs.scala | 210 ++ .../store/InFlightSubmissionStoreTest.scala | 803 ++++ .../store/MultiDomainEventLogTest.scala | 917 +++++ .../store/ParticipantEventLogTest.scala | 123 + ...ParticipantPruningSchedulerStoreTest.scala | 84 + .../store/ParticipantPruningStoreTest.scala | 86 + .../store/ParticipantSettingsStoreTest.scala | 149 + .../store/PreHookRequestJournalStore.scala | 121 + .../participant/store/PreUpdateHookCkj.scala | 75 + .../store/RegisteredDomainsStoreTest.scala | 61 + .../store/RequestJournalStoreTest.scala | 358 ++ .../store/ServiceAgreementStoreTest.scala | 312 ++ .../store/SingleDimensionEventLogTest.scala | 498 +++ .../store/SubmissionTrackerStoreTest.scala | 118 + .../SyncDomainEphemeralStateFactoryTest.scala | 635 ++++ .../store/ThrowOnWriteCommitmentStore.scala | 156 + .../participant/store/ThrowingAcs.scala | 137 + .../participant/store/ThrowingCkj.scala | 64 + .../participant/store/TransferStoreTest.scala | 1283 +++++++ .../store/db/DbAcsCommitmentStoreTest.scala | 106 + .../store/db/DbActiveContractStoreTest.scala | 85 + .../db/DbCommandDeduplicationStoreTest.scala | 47 + .../store/db/DbContractKeyJournalTest.scala | 46 + .../store/db/DbContractStoreTest.scala | 77 + .../store/db/DbDamlPackageStoreTest.scala | 44 + .../DbDomainConnectionConfigStoreTest.scala | 42 + .../store/db/DbDomainParameterStoreTest.scala | 32 + .../store/db/DbEventLogTestResources.scala | 39 + .../db/DbInFlightSubmissionStoreTest.scala | 46 + .../store/db/DbMultiDomainEventLogTest.scala | 158 + .../store/db/DbParticipantEventLogTest.scala | 35 + ...ParticipantPruningSchedulerStoreTest.scala | 43 + .../db/DbParticipantPruningStoreTest.scala | 27 + .../db/DbParticipantSettingsStoreTest.scala | 33 + .../db/DbRegisteredDomainsStoreTest.scala | 35 + .../store/db/DbRequestJournalStoreTest.scala | 49 + .../db/DbServiceAgreementStoreTest.scala | 38 + .../db/DbSingleDimensionEventLogTest.scala | 47 + .../db/DbSubmissionTrackerStoreTest.scala | 43 + .../store/db/DbTransferStoreTest.scala | 41 + .../AcsCommitmentStoreTestInMemory.scala | 36 + .../ActiveContractStoreTestInMemory.scala | 23 + ...ommandDeduplicationStoreTestInMemory.scala | 18 + .../ContractKeyJournalTestInMemory.scala | 19 + .../memory/ContractStoreTestInMemory.scala | 16 + .../memory/DamlPackageStoreTestInMemory.scala | 12 + ...ainConnectionConfigStoreTestInMemory.scala | 21 + .../DomainParameterStoreTestInMemory.scala | 18 + .../InFlightSubmissionStoreTestInMemory.scala | 12 + .../MultiDomainEventLogTestInMemory.scala | 106 + .../ParticipantEventLogTestInMemory.scala | 10 + ...antPruningSchedulerStoreTestInMemory.scala | 20 + .../ParticipantPruningStoreTestInMemory.scala | 13 + ...ParticipantSettingsStoreTestInMemory.scala | 13 + .../RegisteredDomainsStoreTestInMemory.scala | 17 + .../RequestJournalStoreTestInMemory.scala | 20 + .../ServiceAgreementStoreTestInMemory.scala | 19 + .../SingleDimensionEventLogTestInMemory.scala | 20 + .../SubmissionTrackerStoreTestInMemory.scala | 12 + .../store/memory/TransferCacheTest.scala | 344 ++ .../memory/TransferStoreTestInMemory.scala | 19 + .../sync/CantonAuthorityResolverTest.scala | 166 + .../sync/CantonSyncServiceTest.scala | 282 ++ .../sync/DefaultLedgerSyncEvent.scala | 13 + .../sync/ParticipantEventPublisherTest.scala | 134 + .../sync/TimelyRejectNotifierTest.scala | 207 ++ .../topology/DomainOutboxTest.scala | 334 ++ .../LedgerServerPartyNotifierTest.scala | 172 + .../QueueBasedDomainOutboxXTest.scala | 428 +++ .../StoreBasedDomainOutboxXTest.scala | 463 +++ .../canton/domain/block/BlockOrdering.scala | 73 + .../domain/block/BlockOrderingSequencer.scala | 127 + .../canton/domain/block/SequencerDriver.scala | 212 ++ .../canton/logging/BufferingLogger.java | 88 + .../logging/SuppressingLoggerDispatcher.java | 436 +++ .../com/digitalasset/canton/BaseTest.scala | 412 +++ .../canton/BigDecimalImplicits.scala | 18 + .../digitalasset/canton/CloseableTest.scala | 20 + .../canton/HasExecutionContext.scala | 16 + .../canton/HasExecutorService.scala | 122 + .../canton/HasTempDirectory.scala | 56 + .../com/digitalasset/canton/LogReporter.scala | 44 + .../canton/MockedNodeParameters.scala | 53 + .../canton/ProtocolVersionChecks.scala | 262 ++ .../digitalasset/canton/RepeatableTest.scala | 75 + ...ializationDeserializationTestHelpers.scala | 159 + .../com/digitalasset/canton/TestMetrics.scala | 18 + .../canton/UniquePortGenerator.scala | 83 + .../canton/crypto/CryptoTestHelper.scala | 82 + .../canton/crypto/EncryptionTest.scala | 207 ++ .../crypto/PrivateKeySerializationTest.scala | 64 + .../canton/crypto/RandomTest.scala | 27 + .../canton/crypto/SigningTest.scala | 94 + .../digitalasset/canton/crypto/TestHash.scala | 25 + .../provider/symbolic/SymbolicCrypto.scala | 198 + .../symbolic/SymbolicCryptoProvider.scala | 28 + .../symbolic/SymbolicPrivateCrypto.scala | 57 + .../symbolic/SymbolicPureCrypto.scala | 317 ++ .../canton/ledger/api/IsStatusException.scala | 26 + .../canton/ledger/api/MockMessages.scala | 124 + .../canton/logging/LogEntry.scala | 203 + .../canton/logging/SuppressingLogger.scala | 641 ++++ .../canton/logging/SuppressionRule.scala | 74 + .../canton/metrics/CommonMockMetrics.scala | 16 + .../metrics/InMemoryMetricsFactory.scala | 9 + .../protocol/TestDomainParameters.scala | 60 + .../canton/store/db/DbStorageSetup.scala | 364 ++ .../topology/DefaultTestIdentities.scala | 40 + .../topology/TestingIdentityFactory.scala | 680 ++++ .../topology/TestingIdentityFactoryBase.scala | 147 + .../topology/TestingIdentityFactoryX.scala | 659 ++++ .../canton/tracing/TestTelemetry.scala | 62 + .../canton/version/GeneratorsVersion.scala | 72 + .../src/test/resources/logback-test.xml | 60 + .../canton/UniquePortGeneratorTest.scala | 51 + .../lifecycle/OnShutdownRunnerTest.scala | 130 + .../canton/logging/LogEntryTest.scala | 57 + .../logging/SuppressingLoggerTest.scala | 475 +++ .../canton/tracing/TraceContextTest.scala | 85 + .../config/ConfidentialConfigWriter.scala | 21 + .../canton/config/KeyStoreConfig.scala | 46 + .../canton/config/RequireTypes.scala | 347 ++ .../com/digitalasset/canton/error/Alarm.scala | 48 + .../EnterpriseSequencerErrorGroups.scala | 15 + .../canton/error/EthereumErrors.scala | 251 ++ .../canton/error/FabricErrors.scala | 134 + .../canton/error/LogOnCreation.scala | 14 + .../canton/error/SequencerBaseError.scala | 37 + .../canton/time/TimeProvider.scala | 12 + .../canton/util/VersionUtil.scala | 34 + .../version/EthereumContractVersion.scala | 35 + .../canton/logging/ErrorLoggingContext.scala | 143 + .../canton/logging/LoggingContextUtil.scala | 28 + .../logging/LoggingContextWithTrace.scala | 111 + .../canton/logging/NamedLoggerFactory.scala | 133 + .../canton/logging/NamedLogging.scala | 45 + .../canton/logging/NamedLoggingContext.scala | 85 + .../canton/logging/TracedLogger.scala | 31 + .../canton/logging/TracedLoggerOps.scala | 78 + .../digitalasset/canton/logging/package.scala | 14 + .../metrics/OnDemandMetricsReader.scala | 61 + .../telemetry/ConfiguredOpenTelemetry.scala | 23 + .../telemetry/OpenTelemetryFactory.scala | 135 + .../canton/tracing/NoTracing.scala | 9 + .../canton/tracing/TelemetryTracing.scala | 15 + .../canton/tracing/TraceContext.scala | 151 + .../canton/tracing/TraceContextGrpc.scala | 66 + .../digitalasset/canton/tracing/Traced.scala | 60 + .../canton/tracing/TracerProvider.scala | 97 + .../canton/tracing/TracingConfig.scala | 55 + .../canton/tracing/W3CTraceContext.scala | 115 + .../main/scala/com/daml/error/BaseError.scala | 154 + .../error/ContextualizedErrorLogger.scala | 34 + .../main/scala/com/daml/error/DamlError.scala | 36 + .../scala/com/daml/error/ErrorCategory.scala | 393 ++ .../scala/com/daml/error/ErrorClass.scala | 30 + .../main/scala/com/daml/error/ErrorCode.scala | 289 ++ .../scala/com/daml/error/ErrorGroup.scala | 12 + .../scala/com/daml/error/ErrorResource.scala | 103 + .../scala/com/daml/error/GrpcStatuses.scala | 27 + .../main/scala/com/daml/error/NoLogging.scala | 20 + .../com/daml/error/samples/Example.scala | 124 + .../error/utils/DeserializedCantonError.scala | 207 ++ .../com/daml/error/utils/ErrorDetails.scala | 118 + .../error/ContextualizedErrorLoggerSpec.scala | 19 + .../scala/com/daml/error/ErrorCodeSpec.scala | 333 ++ .../scala/com/daml/error/ErrorGroupSpec.scala | 36 + .../com/daml/error/ErrorsAssertions.scala | 127 + .../com/daml/error/GrpcStatusesSpec.scala | 57 + .../error/SecuritySensitiveMessageSpec.scala | 26 + .../error/samples/SampleClientSideSpec.scala | 15 + .../com/daml/error/utils/BenignError.scala | 23 + .../utils/DeserializedCantonErrorSpec.scala | 173 + .../daml/error/utils/ErrorDetailsSpec.scala | 71 + .../com/daml/error/utils/SevereError.scala | 31 + ci/build-canton-3x.sh | 11 +- ci/build.yml | 1 - ci/cron/daily-compat.yml | 51 +- 3398 files changed, 488903 insertions(+), 34 deletions(-) create mode 100644 canton-3x/community/LICENSE-open-source-bundle.txt create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/GrpcCtlRunner.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/AdminCommand.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/DomainAdminCommands.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/DomainTimeCommands.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseMediatorAdministrationCommands.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseSequencerAdminCommands.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiCommands.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiTypeWrappers.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiV2Commands.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/PruningSchedulerCommands.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/SequencerAdminCommands.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/StatusAdminCommands.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/TopologyAdminCommands.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/TopologyAdminCommandsX.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/VaultAdminCommands.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/CommunityCantonStatus.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/ConsoleApiDataObjects.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/DomainParameters.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/LedgerApiObjectMeta.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/Metering.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/PartyDetails.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/PruningSchedule.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/TemplateId.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/Topology.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/UserManagement.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/crypto/CryptoKeys.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/crypto/Encryption.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/crypto/Hash.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/crypto/Signing.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/topologyx/TopologyX.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonCommunityConfig.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/config/CommunityConfigValidations.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/config/ConfigErrors.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/AdminCommandRunner.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/AmmoniteCacheLock.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/AmmoniteConsoleConfig.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/BootstrapScriptException.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/CommunityCantonHealthAdministration.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/CommunityHealthDumpGenerator.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleCommandResult.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleEnvironment.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleEnvironmentBinding.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleErrorHandler.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleGrpcAdminCommandRunner.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleMacros.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleOutput.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/FeatureFlag.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/HealthDumpGenerator.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/Help.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/Helpful.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/InstancesExtensions.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ParticipantReferencesExtensions.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ConsoleCommandGroup.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/DomainAdministration.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/GrpcByteChunksToFileObserver.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/HealthAdministration.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/MediatorAdministrationGroup.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantRepairAdministration.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministrationX.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PruningSchedulerAdministration.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/SequencerAdministrationGroup.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministration.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministrationX.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TrafficControlAdministrationGroup.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TrafficControlSequencerAdministrationGroup.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/VaultAdministration.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/package.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/package.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/environment/CommunityEnvironment.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/environment/Environment.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/environment/Errors.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/environment/Nodes.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/health/HealthCheck.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/health/HealthCheckResult.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/health/HealthServer.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/metrics/MetricsFactory.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/metrics/MetricsSnapshot.scala create mode 100644 canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/metrics/Reporters.scala create mode 120000 canton-3x/community/app/src/main/resources/LICENSE-open-source-bundle.txt create mode 100644 canton-3x/community/app/src/main/resources/application.conf create mode 100644 canton-3x/community/app/src/main/resources/logback.xml create mode 100644 canton-3x/community/app/src/main/resources/repl/banner.txt create mode 100644 canton-3x/community/app/src/main/scala/com/digitalasset/canton/CantonAppDriver.scala create mode 100644 canton-3x/community/app/src/main/scala/com/digitalasset/canton/CantonCommunityApp.scala create mode 100644 canton-3x/community/app/src/main/scala/com/digitalasset/canton/Runner.scala create mode 100644 canton-3x/community/app/src/main/scala/com/digitalasset/canton/cli/Cli.scala create mode 100644 canton-3x/community/app/src/main/scala/com/digitalasset/canton/cli/Command.scala create mode 100644 canton-3x/community/app/src/main/scala/com/digitalasset/canton/config/Generate.scala create mode 100644 canton-3x/community/app/src/main/scala/com/digitalasset/canton/console/BindingsBridge.scala create mode 100644 canton-3x/community/app/src/main/scala/com/digitalasset/canton/console/HeadlessConsole.scala create mode 100644 canton-3x/community/app/src/main/scala/com/digitalasset/canton/console/InteractiveConsole.scala create mode 100755 canton-3x/community/app/src/pack/bin/canton create mode 100644 canton-3x/community/app/src/pack/bin/canton.bat create mode 100644 canton-3x/community/app/src/pack/examples/01-simple-topology/README.md create mode 100644 canton-3x/community/app/src/pack/examples/01-simple-topology/simple-ping.canton create mode 100644 canton-3x/community/app/src/pack/examples/01-simple-topology/simple-topology.conf create mode 100644 canton-3x/community/app/src/pack/examples/02-global-domain/README.md create mode 100644 canton-3x/community/app/src/pack/examples/02-global-domain/global-domain-participant.canton create mode 100644 canton-3x/community/app/src/pack/examples/02-global-domain/global-domain-participant.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/README.md create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/jwt/certificate.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/jwt/jwks.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/jwt/leeway-parameters.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/jwt/unsafe-hmac256.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/large-in-memory-fan-out.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/large-ledger-api-cache.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/public-admin.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/public.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/wildcard.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/domain1.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/participant1.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/participant2.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/participant3.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/participant4.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/parameters/nonuck.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/participant-init.canton create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/remote/domain1.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/remote/participant1.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/storage/dbinit.py create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/storage/h2.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/storage/memory.conf create mode 100644 canton-3x/community/app/src/pack/examples/03-advanced-configuration/storage/postgres.conf create mode 100644 canton-3x/community/app/src/pack/examples/04-create-daml-app/canton.conf create mode 100644 canton-3x/community/app/src/pack/examples/04-create-daml-app/init.canton create mode 100644 canton-3x/community/app/src/pack/examples/05-composability/README.md create mode 100644 canton-3x/community/app/src/pack/examples/05-composability/composability-auto-transfer.canton create mode 100644 canton-3x/community/app/src/pack/examples/05-composability/composability.conf create mode 100644 canton-3x/community/app/src/pack/examples/05-composability/composability1.canton create mode 100644 canton-3x/community/app/src/pack/examples/05-composability/composability2.canton create mode 100644 canton-3x/community/app/src/pack/examples/06-messaging/README.md create mode 100644 canton-3x/community/app/src/pack/examples/06-messaging/canton.conf create mode 100644 canton-3x/community/app/src/pack/examples/06-messaging/contact/.gitignore create mode 100644 canton-3x/community/app/src/pack/examples/06-messaging/contact/daml.yaml create mode 100644 canton-3x/community/app/src/pack/examples/06-messaging/contact/daml/Contact.daml create mode 100644 canton-3x/community/app/src/pack/examples/06-messaging/contact/daml/Contact.solution create mode 100644 canton-3x/community/app/src/pack/examples/06-messaging/contact/frontend-config.js create mode 100644 canton-3x/community/app/src/pack/examples/06-messaging/init.canton create mode 100644 canton-3x/community/app/src/pack/examples/06-messaging/message/.gitignore create mode 100644 canton-3x/community/app/src/pack/examples/06-messaging/message/daml.yaml create mode 100644 canton-3x/community/app/src/pack/examples/06-messaging/message/daml/Message.daml create mode 100644 canton-3x/community/app/src/pack/examples/06-messaging/message/frontend-config.js create mode 100644 canton-3x/community/app/src/pack/examples/07-repair/README.md create mode 100644 canton-3x/community/app/src/pack/examples/07-repair/domain-export-ledger.conf create mode 100644 canton-3x/community/app/src/pack/examples/07-repair/domain-import-ledger.conf create mode 100644 canton-3x/community/app/src/pack/examples/07-repair/domain-repair-init.canton create mode 100644 canton-3x/community/app/src/pack/examples/07-repair/domain-repair-lost.conf create mode 100644 canton-3x/community/app/src/pack/examples/07-repair/domain-repair-new.conf create mode 100644 canton-3x/community/app/src/pack/examples/07-repair/enable-preview-commands.conf create mode 100644 canton-3x/community/app/src/pack/examples/07-repair/import-ledger-init.canton create mode 100644 canton-3x/community/app/src/pack/examples/07-repair/participant1.conf create mode 100644 canton-3x/community/app/src/pack/examples/07-repair/participant2.conf create mode 100644 canton-3x/community/app/src/pack/examples/07-repair/participant3.conf create mode 100644 canton-3x/community/app/src/pack/examples/07-repair/participant4.conf create mode 100644 canton-3x/community/app/src/pack/lib/canton.ico create mode 100644 canton-3x/community/app/src/test/resources/advancedConfDef.env create mode 100644 canton-3x/community/app/src/test/resources/config-snippets/disable-ammonite-cache.conf create mode 100644 canton-3x/community/app/src/test/resources/deprecated-configs/backwards-compatible.conf create mode 100644 canton-3x/community/app/src/test/resources/deprecated-configs/new-config-fields-take-precedence.conf create mode 100644 canton-3x/community/app/src/test/resources/documentation-snippets/auth-token-config.conf create mode 100644 canton-3x/community/app/src/test/resources/documentation-snippets/caching-configs.conf create mode 100644 canton-3x/community/app/src/test/resources/documentation-snippets/command-service-max-commands-in-flight.conf create mode 100644 canton-3x/community/app/src/test/resources/documentation-snippets/console-timeouts.conf create mode 100644 canton-3x/community/app/src/test/resources/documentation-snippets/dev-version-support.conf create mode 100644 canton-3x/community/app/src/test/resources/documentation-snippets/ledger-api-target-audience.conf create mode 100644 canton-3x/community/app/src/test/resources/documentation-snippets/ledger-api-target-scope.conf create mode 100644 canton-3x/community/app/src/test/resources/documentation-snippets/logging-event-details.conf create mode 100644 canton-3x/community/app/src/test/resources/documentation-snippets/migrate-and-start.conf create mode 100644 canton-3x/community/app/src/test/resources/documentation-snippets/no-fail-fast.conf create mode 100644 canton-3x/community/app/src/test/resources/documentation-snippets/non-standard-config.conf create mode 100644 canton-3x/community/app/src/test/resources/documentation-snippets/non-uck-mode.conf create mode 100644 canton-3x/community/app/src/test/resources/documentation-snippets/postgres-ssl.conf create mode 100644 canton-3x/community/app/src/test/resources/documentation-snippets/startup-parallelism.conf create mode 100644 canton-3x/community/app/src/test/resources/documentation-snippets/storage-queue-size.conf create mode 100644 canton-3x/community/app/src/test/resources/dummy.crt create mode 120000 canton-3x/community/app/src/test/resources/examples create mode 100644 canton-3x/community/app/src/test/resources/invalid-configs/bort.conf create mode 100644 canton-3x/community/app/src/test/resources/invalid-configs/duplicate-storage.conf create mode 100644 canton-3x/community/app/src/test/resources/invalid-configs/include-missing-file.conf create mode 100644 canton-3x/community/app/src/test/resources/invalid-configs/invalid-node-names.conf create mode 100644 canton-3x/community/app/src/test/resources/invalid-configs/missing-bracket.conf create mode 100644 canton-3x/community/app/src/test/resources/invalid-configs/negative-port.conf create mode 100644 canton-3x/community/app/src/test/resources/invalid-configs/no-manual-start.conf create mode 100644 canton-3x/community/app/src/test/resources/invalid-configs/require-missing-file.conf create mode 100644 canton-3x/community/app/src/test/resources/invalid-configs/storage-url-with-password.conf create mode 100644 canton-3x/community/app/src/test/resources/invalid-configs/undefined-env-var.conf create mode 100644 canton-3x/community/app/src/test/resources/invalid-configs/unknown-key-in-nested-config.conf create mode 100644 canton-3x/community/app/src/test/resources/scripts/bootstrap-with-error-dynamic.canton create mode 100644 canton-3x/community/app/src/test/resources/scripts/bootstrap-with-error.canton create mode 100644 canton-3x/community/app/src/test/resources/scripts/bootstrap.canton create mode 100644 canton-3x/community/app/src/test/resources/scripts/participant1.canton create mode 100644 canton-3x/community/app/src/test/resources/scripts/run.canton create mode 100644 canton-3x/community/app/src/test/resources/scripts/startup.canton create mode 100644 canton-3x/community/app/src/test/scala/com/digitalasset/canton/ConfigStubs.scala create mode 100644 canton-3x/community/app/src/test/scala/com/digitalasset/canton/admin/api/client/GrpcCtlRunnerTest.scala create mode 100644 canton-3x/community/app/src/test/scala/com/digitalasset/canton/admin/api/client/data/MeteringTest.scala create mode 100644 canton-3x/community/app/src/test/scala/com/digitalasset/canton/cli/CliTest.scala create mode 100644 canton-3x/community/app/src/test/scala/com/digitalasset/canton/config/CantonCommunityConfigTest.scala create mode 100644 canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/AmmoniteCacheLockTest.scala create mode 100644 canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/ConsoleCommandResultTest.scala create mode 100644 canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/ConsoleMacrosTest.scala create mode 100644 canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/ConsoleTest.scala create mode 100644 canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/HelpTest.scala create mode 100644 canton-3x/community/app/src/test/scala/com/digitalasset/canton/environment/CommunityEnvironmentTest.scala create mode 100644 canton-3x/community/app/src/test/scala/com/digitalasset/canton/environment/NodesTest.scala create mode 100644 canton-3x/community/app/src/test/scala/com/digitalasset/canton/health/HealthCheckTest.scala create mode 100644 canton-3x/community/app/src/test/scala/com/digitalasset/canton/health/HealthServerTest.scala create mode 100644 canton-3x/community/app/src/test/scala/com/digitalasset/canton/integration/tests/EnterpriseFeatureInCommunityIntegrationTest.scala create mode 100644 canton-3x/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ExampleIntegrationTest.scala create mode 100644 canton-3x/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/CliIntegrationTest.scala create mode 100644 canton-3x/community/app/src/test/scala/com/digitalasset/canton/metrics/MetricsFactoryTest.scala create mode 100644 canton-3x/community/app/src/test/scala/com/digitalasset/canton/metrics/MetricsFactoryValues.scala create mode 100644 canton-3x/community/base/src/main/protobuf/buf.yaml create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/crypto/admin/v0/vault_service.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/crypto/v0/crypto.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/sequencer_authentication_service.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/sequencer_connect_service.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/sequencer_connection.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/sequencer_service.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/service_agreement.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v1/sequencer_connection.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/health/admin/v0/status_service.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/acs_commitments.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/causality.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/common.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/mediator.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/mediator_response.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/participant_transaction.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/participant_transfer.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/sequencing.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/storage.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/synchronization.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/topology.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/traffic_control_parameters.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/versioned-google-rpc-status.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/common.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/mediator.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/mediator_response.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/merkle.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/participant_transaction.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/participant_transfer.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/sequencing.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/signed_content.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/synchronization.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/topology.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/domain_params.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/mediator.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/mediator_response.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/participant_transaction.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/participant_transfer.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/topology.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v3/mediator.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v3/participant_transaction.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v4/synchronization.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/pruning/admin/v0/pruning.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/scalapb/package.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/time/admin/v0/domain_time_service.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/time/admin/v0/time_tracker_config.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/time/v0/time_proof.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/initialization_service.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/topology_aggregation_service.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/topology_ext.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/topology_manager_read_service.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/topology_manager_write_service.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v1/initialization_service.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v1/topology_manager_read_service.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v1/topology_manager_write_service.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/traffic/v0/member_traffic_status.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/v0/trace_context.proto create mode 100644 canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/version/untyped_versioned_message.proto create mode 100644 canton-3x/community/base/src/main/protobuf/google/rpc/package.proto create mode 100644 canton-3x/community/base/src/main/resources/rewrite-appender.xml create mode 100644 canton-3x/community/base/src/main/resources/rewrite-async-appender.xml create mode 100644 canton-3x/community/base/src/main/scala/com/daml/nonempty/NonEmptyUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/ProtoDeserializationError.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/Tags.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/common/domain/ServiceAgreement.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/DirectExecutionContext.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/ExecutionContextMonitor.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/ExecutorServiceExtensions.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/FutureSupervisor.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/HasFutureSupervision.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/IdlenessExecutorService.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/SupervisedPromise.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/Threading.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/ApiLoggingConfig.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/CacheConfig.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/CantonRequireTypes.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/DomainTimeTrackerConfig.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/LoggingConfig.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/ProcessingTimeouts.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/QueryCostMonitoringConfig.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/RefinedNonNegativeDuration.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/SequencerConnectionConfig.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/ServerConfig.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/StorageConfig.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/TestingConfigInternal.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/TimeProofRequestConfig.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/CryptoApi.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/CryptoKeys.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Encryption.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Hash.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/HashBuilder.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/HashPurpose.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Hkdf.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Hmac.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/JavaKeyConverter.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Nonce.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Random.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Salt.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Signing.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/SyncCryptoApiProvider.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/X509CertificatePem.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreExtended.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPublicStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPrivateStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPublicStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/memory/InMemoryCryptoPrivateStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/memory/InMemoryCryptoPublicStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ActionDescription.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/CantonTimestamp.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/CantonTimestampSecond.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/CommonMetadata.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/Counter.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/FullInformeeTree.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/FullTransactionViewTree.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/GenTransactionTree.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/GenTransferViewTree.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/Informee.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/InformeeTree.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/KeyResolution.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/LightTransactionViewTree.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/MerkleSeq.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/MerkleTree.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ParticipantMetadata.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ParticipantTransactionView.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/PeanoQueue.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/PeanoTreeQueue.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ProcessedDisclosedContract.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/RepairContract.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/SubmitterMetadata.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/Timestamp.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransactionSubviews.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransactionView.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransactionViewTree.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransferInViewTree.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransferOutViewTree.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransferSubmitterMetadata.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewCommonData.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewParticipantData.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewPosition.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewTree.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewType.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/Witnesses.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/environment/CantonNodeParameters.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/CantonError.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/CantonErrorGroups.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/DecodedRpcStatus.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/MediatorError.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/TransactionError.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/external/BackgroundRunner.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/AtomicHealthElement.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/CloseableHealthElement.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/ComponentHealthState.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/ComponentStatus.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/CompositeHealthElement.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/HealthComponent.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/HealthElement.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/HealthListener.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/HealthService.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/MutableHealthComponent.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/ServiceHealthStatusManager.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/ToComponentHealthState.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/ClosingException.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FlagCloseable.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FlagCloseableAsync.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdown.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/Lifecycle.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/OnShutdownRunner.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/PerformUnlessClosing.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/PromiseUnlessShutdown.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/ShutdownFailedException.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/UnlessShutdown.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/package.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/LastErrorsAppender.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/RewritingAppender.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/CantonPrettyPrinter.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/Pretty.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyInstances.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyPrinting.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/DbStorageMetrics.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/Gauges.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/MetricHandle.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/MetricsFactoryType.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/SequencerClientMetrics.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/Endpoint.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/ApiRequestLogger.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonCommunityServerInterceptors.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonGrpcUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonServerBuilder.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/ClientChannelBuilder.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/GrpcError.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/package.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/AgreementText.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/CantonContractIdVersion.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/ConfirmationPolicy.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/ContractMetadata.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/CreatedContract.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParameters.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParametersLookup.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/DriverContractMetadata.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/GlobalKeySerialization.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/HasSerializableContract.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/InputContract.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/LfHashSyntax.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/Phase37Processor.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/RefIdentifierSyntax.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/ResolvedKey.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/RollbackContext.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/SerializableContract.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/SerializableContractWithWitnesses.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/SerializableDeduplicationPeriod.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/SerializableRawContractInstance.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/Tags.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/TransferDomainId.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/Unicum.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/AcsCommitment.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/CausalityMessage.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/DomainTopologyTransactionMessage.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/EncryptedViewMessage.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/EnvelopeContent.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/HasDomainId.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/HasRequestId.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/InformeeMessage.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/LocalVerdict.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/MalformedMediatorRequestResult.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/MediatorRequest.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/MediatorResponse.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/MediatorResult.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/ProtocolMessage.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RegisterTopologyTransactionRequest.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RegisterTopologyTransactionResponse.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RootHashMessage.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RootHashMessageRecipients.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/SignedProtocolMessage.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/SignedProtocolMessageContent.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TransactionResultMessage.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TransferInMediatorMessage.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TransferOutMediatorMessage.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TransferResult.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TypedSignedProtocolMessageContent.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/Verdict.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/package.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/package.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/pruning/PruningPhase.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/DatabaseStorageError.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/DbMigrations.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/DbStorageSingle.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/DbVersionCheck.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/Storage.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/TransactionalStoreUpdate.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/ApplicationHandler.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/AsyncResult.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/EnvelopeBox.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/HandlerResult.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencedEventMonotonicityChecker.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerAggregator.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerAggregatorPekko.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerClientRecorder.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnection.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnections.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/TrafficControlParameters.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationToken.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationTokenProvider.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/MemberAuthentication.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/grpc/AuthenticationTokenManager.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/grpc/Constant.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/grpc/SequencerClientAuthentication.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/DelayedSequencerClient.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/PeriodicAcknowledgements.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/ReplayConfig.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/RequestSigner.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/ResilientSequencerSubscriberPekko.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/ResilientSequencerSubscription.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendAsyncClientError.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendCallback.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendResult.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendTracker.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendType.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencedEventValidator.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientConfig.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientFactory.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientSend.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientSubscriptionError.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientSubscriptionException.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientTransportFactory.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerSubscription.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerSubscriptionPekko.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerTransportState.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SubscriptionErrorRetryPolicy.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SubscriptionErrorRetryPolicyPekko.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SubscriptionRetryDelayRule.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/grpc/GrpcSequencerChannelBuilder.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/package.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerClientAuth.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerClientTransport.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerClientTransportPekko.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerSubscription.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSubscriptionErrorRetryPolicy.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSubscriptionErrorRetryPolicyPekko.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/SequencerClientTransport.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/SequencerClientTransportPekko.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingEventsSequencerClientTransport.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingSendsSequencerClientTransport.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/CleanSequencerCounterTracker.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/CounterCapture.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/HasReceivedEvent.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/StoreSequencedEvent.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/ThrottlingApplicationEventHandler.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handshake/HandshakeRequestError.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handshake/SequencerHandshake.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handshake/SupportsHandshake.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/package.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/AcknowledgeRequest.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/AggregationId.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/AggregationRule.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Batch.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/ClosedEnvelope.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Envelope.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/HandshakeRequest.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/HandshakeResponse.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/MessageId.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/OpenEnvelope.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Recipient.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Recipients.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/RecipientsTree.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SendAsyncError.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SequencedEvent.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SequencedEventTrafficState.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SequencerDeliverError.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SignedContent.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SubmissionRequest.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SubscriptionRequest.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SubscriptionResponse.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TopologyStateForInitRequest.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TopologyStateForInitResponse.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TrafficState.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/serialization/DeterministicEncoding.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/serialization/HasCryptographicEvidence.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/serialization/ProtoConverter.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/CursorPreheadStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/IndexedStringStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/PrunableByTime.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/SendTrackerStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/SequencedEventStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/SequencerCounterTrackerStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/SessionKeyStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbCursorPreheadStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbExceptions.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbIndexedStringStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbPrunableByTime.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbSequencedEventStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbSequencerCounterTrackerStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/RequiredTypesCodec.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/SequencerClientDiscriminator.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryCursorPreheadStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryIndexedStringStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryPrunableByTime.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemorySendTrackerStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemorySequencedEventStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemorySequencerCounterTrackerStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/Clock.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/DomainTimeTracker.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/PeriodicAction.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/RefinedDurations.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/TimeAwaiter.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/TimeProof.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/TimeProofRequestSubmitter.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/DomainOutboxQueue.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/Identifier.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/KeyCollection.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/MediatorRef.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/Member.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManager.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerStatus.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerX.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyStateProcessorX.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/CachingDomainTopologyClient.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/DomainTrafficStateClient.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedDomainTopologyClient.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedDomainTopologyClientX.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationChainX.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationGraph.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationGraphX.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/DomainTopologyTransactionMessageValidator.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidator.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidatorX.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/SnapshotAuthorizationValidator.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TerminateProcessing.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTimes.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTimestampPlusEpsilonTracker.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessor.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorCommon.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorX.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionSubscriber.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TransactionAuthorizationValidator.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TransactionAuthorizationValidatorX.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStateForInititalizationService.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStoreX.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollection.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollectionX.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreX.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStore.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStoreX.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/ParticipantAttributes.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/SignedTopologyTransaction.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/SignedTopologyTransactionX.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingX.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingXChecks.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransaction.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransactionX.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/UniquePath.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/tracing/SerializableTraceContext.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/tracing/Spanning.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/traffic/EventCostCalculator.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/BinaryFileUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ByteStringUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ChainUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/Checked.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/CheckedT.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/DelayUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/EitherTUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/EitherUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ErrorUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/FutureInstances.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/FutureUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/HasFlushFuture.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/HexString.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/LazyValWithContext.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/LengthLimitedByteString.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/LfTransactionUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/LoggerUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/MapsUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/MessageRecorder.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/MonadUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/NoCopy.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/OptionUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/OrderedBucketMergeHub.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/PekkoUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/PriorityBlockingQueueUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ResourceUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ShowUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/SimpleExecutionQueue.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/SingleUseCell.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/StackTraceUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/Thereafter.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/TrieMapUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/TryUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/package.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/Jitter.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/LICENSE create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/Policy.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/RetryEither.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/RetryUtil.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/Success.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/package.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/HasProtocolVersionedWrapper.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/HasVersionedToByteString.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/HasVersionedWrapper.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/ProtocolVersion.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/VersionedMessage.scala create mode 100644 canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/version.scala create mode 100644 canton-3x/community/base/src/main/scala/scala/concurrent/BatchingExecutorCanton.scala create mode 100644 canton-3x/community/base/src/main/scala/slick/util/AsyncExecutorWithMetrics.scala create mode 100644 canton-3x/community/base/src/main/scala/slick/util/AsyncExecutorWithShutdown.scala create mode 100644 canton-3x/community/base/src/main/scala/slick/util/LICENSE.txt create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ActiveContracts.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ArchivedEvent.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Bool.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Checkpoint.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Command.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CommandsSubmission.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CompletionEndResponse.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CompletionStreamRequest.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CompletionStreamResponse.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Contract.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ContractFilter.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ContractId.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ContractMetadata.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreateAndExerciseCommand.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreateCommand.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreateUserRequest.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreateUserResponse.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreatedEvent.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlCollectors.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlEnum.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlGenMap.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlList.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlMap.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlOptional.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlRecord.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlTextMap.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Date.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Decimal.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DeleteUserRequest.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DeleteUserResponse.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DisclosedContract.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Event.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/EventUtils.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ExerciseByKeyCommand.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ExerciseCommand.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ExercisedEvent.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Filter.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/FiltersByParty.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetActiveContractsRequest.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetActiveContractsResponse.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetEventsByContractIdResponse.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetEventsByContractKeyResponse.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetFlatTransactionResponse.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetLedgerEndResponse.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetPackageResponse.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetPackageStatusResponse.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionResponse.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionTreesResponse.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionsRequest.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionsResponse.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetUserRequest.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetUserResponse.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GrantUserRightsRequest.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GrantUserRightsResponse.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Identifier.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/InclusiveFilter.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Int64.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/LedgerOffset.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ListUserRightsRequest.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ListUserRightsResponse.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ListUsersRequest.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ListUsersResponse.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/NoFilter.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Numeric.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Party.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Record.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/RevokeUserRightsRequest.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/RevokeUserRightsResponse.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/SubmitAndWaitRequest.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/SubmitCommandsRequest.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/SubmitRequest.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Template.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Text.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Timestamp.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Transaction.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/TransactionFilter.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/TransactionTree.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/TreeEvent.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Unit.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/UnsupportedEventTypeException.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/UpdateSubmission.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/User.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Value.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Variant.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/WorkflowEvent.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/ByKey.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/Choice.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/Contract.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/ContractCompanion.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/ContractDecoder.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/ContractId.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/ContractTypeCompanion.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/ContractWithInterfaceView.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/ContractWithKey.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/CreateAnd.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/Created.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/DamlEnum.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/DamlRecord.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/DefinedDataType.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/Exercised.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/Exercises.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/HasCommands.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/InterfaceCompanion.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/PrimitiveValueDecoders.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/Update.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/ValueDecoder.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/Variant.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/json/JsonLfDecoder.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/json/JsonLfDecoders.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/json/JsonLfEncoder.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/json/JsonLfEncoders.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/json/JsonLfReader.java create mode 100644 canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/json/JsonLfWriter.java create mode 100644 canton-3x/community/bindings-java/src/test/java/com/daml/ledger/javaapi/data/codegen/json/JsonLfDecodersTest.java create mode 100644 canton-3x/community/bindings-java/src/test/java/com/daml/ledger/javaapi/data/codegen/json/JsonLfEncodersTest.java create mode 100644 canton-3x/community/bindings-java/src/test/java/com/daml/ledger/javaapi/data/codegen/json/TestHelpers.java create mode 100644 canton-3x/community/bindings-java/src/test/resources/logback-test.xml create mode 100644 canton-3x/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/CommandSpec.scala create mode 100644 canton-3x/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/DamlRecordSpec.scala create mode 100644 canton-3x/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/EventSpec.scala create mode 100644 canton-3x/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/Generators.scala create mode 100644 canton-3x/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/GetActiveContractsRequestSpec.scala create mode 100644 canton-3x/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/SubmitCommandsRequestSpec.scala create mode 100644 canton-3x/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/TimestampSpec.scala create mode 100644 canton-3x/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/TransactionFilterSpec.scala create mode 100644 canton-3x/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/ValueSpec.scala create mode 100644 canton-3x/community/common/.gitignore create mode 100644 canton-3x/community/common/src/main/daml/CantonExamples/CantonExamples.daml create mode 100644 canton-3x/community/common/src/main/daml/CantonExamples/ContractKeys.daml create mode 100644 canton-3x/community/common/src/main/daml/CantonExamples/Divulgence.daml create mode 100644 canton-3x/community/common/src/main/daml/CantonExamples/Iou.daml create mode 100644 canton-3x/community/common/src/main/daml/CantonExamples/LockIou.daml create mode 100644 canton-3x/community/common/src/main/daml/CantonExamples/Paint.daml create mode 100644 canton-3x/community/common/src/main/daml/CantonExamples/SafePaint.daml create mode 100644 canton-3x/community/common/src/main/daml/CantonExamples/Swap.daml create mode 100644 canton-3x/community/common/src/main/daml/CantonExamples/daml.yaml create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/h2/dev/V999__dev.sha256 create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/h2/dev/V999__dev.sql create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/h2/dev/reference/V998__blocks.sql create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sha256 create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sql create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/h2/stable/V2__changes_for_2.3.sha256 create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/h2/stable/V2__changes_for_2.3.sql create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/h2/stable/V3__changes_for_2.4.sha256 create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/h2/stable/V3__changes_for_2.4.sql create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/h2/stable/V4__changes_for_2.5.sha256 create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/h2/stable/V4__changes_for_2.5.sql create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/h2/stable/V5__changes_for_2.6.sha256 create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/h2/stable/V5__changes_for_2.6.sql create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/h2/stable/V6__changes_for_2.7.sha256 create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/h2/stable/V6__changes_for_2.7.sql create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/h2/stable/V7__changes_for_2.8.sql create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/h2/stable/V8__changes_for_3.0.sql create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/postgres/dev/V999__dev.sha256 create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/postgres/dev/V999__dev.sql create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/postgres/dev/reference/V998__blocks.sql create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sha256 create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/postgres/stable/V2__changes_for_2.3.sha256 create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/postgres/stable/V2__changes_for_2.3.sql create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/postgres/stable/V3__changes_for_2.4.sha256 create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/postgres/stable/V3__changes_for_2.4.sql create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/postgres/stable/V4__changes_for_2.5.sha256 create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/postgres/stable/V4__changes_for_2.5.sql create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/postgres/stable/V5__changes_for_2.6.sha256 create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/postgres/stable/V5__changes_for_2.6.sql create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/postgres/stable/V6__changes_for_2.7.sha256 create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/postgres/stable/V6__changes_for_2.7.sql create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/postgres/stable/V7__changes_for_2.8.sql create mode 100644 canton-3x/community/common/src/main/resources/db/migration/canton/postgres/stable/V8__changes_for_3.0.sql create mode 100755 canton-3x/community/common/src/main/resources/db/migration/canton/recompute-sha256sums.sh create mode 100644 canton-3x/community/common/src/main/scala/com/daml/lf/CantonOnly.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/admin/grpc/GrpcPruningScheduler.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/common/domain/SequencerBasedRegisterTopologyTransactionHandle.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/common/domain/SequencerConnectClient.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/common/domain/grpc/GrpcSequencerConnectClient.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/common/domain/grpc/SequencerConnectClientInterceptor.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/common/domain/grpc/SequencerInfoLoader.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/config/BatchAggregatorConfig.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/config/CantonConfigUtil.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/config/ConfigDefaults.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/config/CryptoConfig.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/config/CryptoProvider.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/config/DbCacheConfig.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/config/DeprecatedConfigUtils.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/config/InitConfig.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/config/LocalNodeConfig.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/config/MonitoringConfig.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/config/ProtocolConfig.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/config/TopologyXConfig.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/crypto/Blake2xb.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/crypto/CryptoFactory.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/crypto/CryptoHandshakeValidator.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/crypto/LtHash16.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/crypto/admin/grpc/GrpcVaultService.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/crypto/deterministic/encryption/SP800HashDRBGSecureRandom.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/crypto/provider/CryptoKeyConverter.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/crypto/provider/jce/JceJavaConverter.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/crypto/provider/jce/JcePrivateCrypto.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/crypto/provider/jce/JcePureCrypto.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/crypto/provider/jce/JceSecurityProvider.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/crypto/provider/tink/TinkJavaConverter.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/crypto/provider/tink/TinkKeyFormat.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/crypto/provider/tink/TinkPrivateCrypto.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/crypto/provider/tink/TinkPureCrypto.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/data/ConcurrentHMap.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/data/TaskScheduler.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/data/TransactionViewDecomposition.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/data/TransactionViewDecompositionFactory.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/environment/BootstrapStage.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/environment/CantonNode.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/environment/CantonNodeBootstrap.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/environment/CantonNodeBootstrapCommon.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/environment/CantonNodeBootstrapX.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/environment/DomainTopologyInitializationCallback.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/environment/NodeFactoryArguments.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/health/GrpcHealthReporter.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/health/GrpcHealthServer.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/health/admin/data/NodeStatus.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/health/admin/grpc/GrpcStatusService.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/lifecycle/StartAndCloseable.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/logging/CantonFilterEvaluator.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/logging/CantonJsonEncoder.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/logging/ThrottleFilterEvaluator.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/metrics/MetricDoc.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/metrics/MetricsHelper.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/metrics/package.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/ErrorLoggingStreamObserver.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/ForwardingStreamObserver.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/GrpcDynamicService.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/RecordingStreamObserver.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/StaticGrpcServices.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/protocol/ContractSalt.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/protocol/PackageInfoService.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/protocol/StoredParties.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/protocol/TransactionMetadata.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/protocol/UnicumGenerator.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/protocol/WellFormedTransaction.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/protocol/WithContractHash.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/protocol/WithTransactionId.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/protocol/messages/ConfirmationRequest.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/resource/IdempotentInsert.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/resource/StorageDebug.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/scheduler/Schedule.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/scheduler/Scheduler.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/scheduler/Schedulers.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/sequencing/DelayLogger.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/sequencing/client/GrpcSequencerAuthenticationSupport.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/sequencing/handlers/DiscardIgnoredEvents.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/sequencing/handlers/EnvelopeOpener.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/sequencing/handlers/StripSignature.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/sequencing/protocol/VerifyActive.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/sequencing/protocol/WithRecipients.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/store/PruningSchedulerStore.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/store/db/DbBulkUpdateProcessor.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/store/db/DbPruningSchedulerStore.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/store/db/DbSendTrackerStore_Unused.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/store/memory/InMemoryPruningSchedulerStore.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/time/GrpcDomainTimeService.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/time/HasUptime.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/time/TestingTimeService.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/topology/DomainOutboxDispatchHelper.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/topology/DomainOutboxStatus.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/topology/NodeId.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/topology/PartyToParticipantComputations.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/topology/QueueBasedDomainOutboxX.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/topology/StoreBasedDomainOutbox.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcIdentityInitializationServiceX.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcInitializationService.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyAggregationService.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerReadService.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerReadServiceX.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerWriteService.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerWriteServiceX.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/topology/store/DomainTopologyStore.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/topology/store/InitializationStore.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/tracing/BatchTracing.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/traffic/MemberTrafficStatus.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/traffic/TopUpEvent.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/traffic/TopUpQueue.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/util/BatchAggregator.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/util/DamlPackageLoader.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/util/HasReadWriteLock.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/util/IdUtil.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/util/IterableUtil.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/util/OptionUtils.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/util/PathUtils.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/util/RangeUtil.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/util/RateLimiter.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/util/SeqUtil.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/util/SetsUtil.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/util/SnapshottableList.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/util/TextFileUtil.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/util/UByte.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/version/CantonVersion.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/version/DamlLfVersionToProtocolVersions.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/version/ProtocolVersionCompatibility.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/version/ReleaseVersionToProtocolVersions.scala create mode 100644 canton-3x/community/common/src/main/scala/com/digitalasset/canton/version/ReleaseVersions.scala create mode 100644 canton-3x/community/common/src/test/java/com/digitalasset/canton/annotations/UnstableTest.java create mode 100644 canton-3x/community/common/src/test/java/com/digitalasset/canton/sequencing/protocol/RecipientTest.scala create mode 100644 canton-3x/community/common/src/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTest.scala create mode 100644 canton-3x/community/common/src/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTreeTest.scala create mode 100644 canton-3x/community/common/src/test/protobuf/com/digitalasset/canton/scalapb/package.proto create mode 100644 canton-3x/community/common/src/test/protobuf/com/digitalasset/canton/test/hello.proto create mode 100644 canton-3x/community/common/src/test/protobuf/com/digitalasset/canton/test/parsing-attack.proto create mode 100644 canton-3x/community/common/src/test/protobuf/com/digitalasset/canton/test/sequencing.proto create mode 100644 canton-3x/community/common/src/test/protobuf/com/digitalasset/canton/test/versioned-messages.proto create mode 100644 canton-3x/community/common/src/test/resources/blake2xb-golden-tests.txt create mode 100644 canton-3x/community/common/src/test/resources/logback-test.xml create mode 100644 canton-3x/community/common/src/test/resources/tls/participant.pem create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/CheckedTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/ComparesLfTransactions.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/DefaultDamlValues.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/Generators.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/GeneratorsLf.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/HasActorSystem.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/NeedsNewLfContractIds.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/SequentialTestByKey.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/UnstableTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/concurrent/DirectExecutionContextTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/concurrent/ExecutionContextMonitorTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/concurrent/IdlenessExecutorServiceTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/concurrent/ThreadingTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/config/DbConfigTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/config/DeprecatedConfigUtilsTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/config/GeneratorsConfig.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/config/LengthLimitedStringTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/config/LengthLimitedStringWrapperTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/config/RefinedNonNegativeDurationTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/Blake2xbTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/CryptoPureApiCantonCompatibilityTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/EncodableString.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/GeneratorsCrypto.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/HashBuilderTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/HashPurposeTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/HashTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/HkdfTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/HmacTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/JavaPublicKeyConverterTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/LtHash16Test.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/SaltTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/TestSalt.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/deterministic/encryption/SP800HashDRBGSecureRandomTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/jce/JceCryptoTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/symbolic/SymbolicCryptoTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/provider/tink/TinkCryptoTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreExtendedTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/store/CryptoPublicStoreTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPrivateStoreTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPublicStoreTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/store/memory/CryptoPrivateStoreTestInMemory.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/crypto/store/memory/CryptoPublicStoreTestInMemory.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/data/ActionDescriptionTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/data/CantonTimestampSecondTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/data/CantonTimestampTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/data/ConcurrentHMapTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/data/GenTransactionTreeTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/data/GeneratorsData.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/data/GeneratorsTransferData.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/data/MerkleSeqTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/data/MerkleTreeTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/data/PeanoQueueTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/data/TaskSchedulerTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/data/TransactionViewDecompositionTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/data/TransactionViewTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/data/ViewPositionTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/data/package.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/environment/BootstrapStageTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/error/CantonErrorTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/error/ErrorLoggingContextSpec.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/error/GeneratorsError.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/examples/IouSyntax.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/health/ComponentStatusTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/integration/tests/benchmarks/LtHash16Benchmark.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/ledger/api/GeneratorsApi.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/ledger/offset/GeneratorsOffset.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdownTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/lifecycle/LifecycleTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/lifecycle/PromiseUnlessShutdownTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/lifecycle/StartAndCloseableTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/logging/NamedEventCapturingLogger.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/logging/NamedLoggingTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/logging/pretty/PrettyPrintingImplicitResolutionTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/logging/pretty/PrettyPrintingTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/logging/pretty/PrettyTestInstances.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/metrics/LoadGaugeTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/metrics/MetricDocTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/networking/grpc/ApiRequestLoggerTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/networking/grpc/CantonGrpcUtilTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/networking/grpc/CommunityClientChannelBuilderTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/protobuf/ProtobufParsingAttackTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/protobuf/UntypedVersionedMessageTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/protocol/CantonContractIdVersionTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/protocol/ConfirmationPolicyTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/protocol/ContractMetadataTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/protocol/ExampleTransaction.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/protocol/ExampleTransactionFactory.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/protocol/ExampleTransactionFactoryTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/protocol/GeneratorsProtocol.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/protocol/SerializableContractTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/protocol/SerializableRawContractInstanceTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/protocol/VerdictTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionMergeTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/protocol/messages/AcsCommitmentTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/protocol/messages/GeneratorsMessages.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/protocol/messages/MediatorResponseTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/protocol/messages/SignedProtocolMessageContentTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/protocol/messages/TopologyTransactionTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/resource/DbStorageSingleTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/scheduler/CronTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/AsyncResultTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/DelayLoggerTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/GeneratorsSequencing.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/SequencedEventMonotonicityCheckerTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/SequencerAggregatorPekkoTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/SequencerConnectionTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/SequencerTestUtils.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/authentication/grpc/AuthenticationTokenManagerTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/authentication/grpc/SequencerClientAuthenticationTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/PeriodicAcknowledgementsTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/ResilientSequencerSubscriberPekkoTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/ResilientSequencerSubscriptionTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SendTrackerTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencedEventTestFixture.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencedEventValidatorTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerAggregatorTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/TestSequencerClientSend.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerSubscriptionTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSubscriptionErrorRetryPolicyTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/handlers/CounterCaptureTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/protocol/GeneratorsProtocol.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/protocol/SequencedEventTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/sequencing/protocol/SubmissionRequestTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/serialization/DeterministicEncodingTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/serialization/HasCryptographicEvidenceTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/serialization/ProtoConverterTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/store/CursorPreheadStoreTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/store/PrunableByTimeTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/store/PruningSchedulerStoreTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/store/SendTrackerStoreTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/store/SequencedEventStoreTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/store/SequencerCounterTrackerStoreTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/store/db/DatabaseDeadlockTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/store/db/DatabaseLimitNbParamTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/store/db/DbIndexedStringsStoreTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/store/db/DbPruningSchedulerStoreTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/store/db/DbSendTrackerTrackerStoreTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/store/db/DbSequencedEventStoreTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/store/db/DbSequencerCounterTrackerStoreTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/store/db/DbStorageIdempotency.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/store/db/DbTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/store/memory/PruningSchedulerStoreTestInMemory.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/store/memory/SendTrackerTrackerStoreTestInMemory.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/store/memory/SequencedEventStoreTestInMemory.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/store/memory/SequencerCounterTrackerStoreTestInMemory.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/time/ClockTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/time/DomainTimeTrackerTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/time/GeneratorsTime.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/time/PeriodicActionTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/time/RefinedDurationsTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/time/TimeProofRequestSubmitterTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/time/TimeProofTestUtil.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/GeneratorsTopology.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/IdentifierTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/PartyToParticipantComputationsTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/TestingIdentityFactoryTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/TopologyTransactionTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/client/CachingDomainTopologyClientTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClientTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/client/StoreBasedDomainTopologyClientTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/client/StoreBasedDomainTopologySnapshotXTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/processing/AuthorizationGraphTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/processing/AuthorizationGraphXTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/processing/DomainTopologyTransactionMessageValidatorTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidatorTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidatorTestX.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/processing/SnapshotAuthorizationValidatorTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTimestampPlusEpsilonTrackerTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionTestFactory.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionTestFactoryX.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/processing/UnionspaceAuthorizationGraphXTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/store/DownloadTopologyStateForInitializationServiceTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/store/InitializationStoreTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreXTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreXTestBase.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreXTestData.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollectionTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/store/db/DbDownloadTopologyStateForInitializationServiceTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreXHelper.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreXTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/store/memory/InMemoryDownloadTopologyStateForInitializationServiceTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStoreXTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/store/memory/TopologyStoreTestInMemory.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/GeneratorsTransaction.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/TopologyChangeOpTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/ValidatingTopologyMappingXChecksTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/tracing/GrpcTelemetryContextPropagationTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/tracing/SerializableTraceContextTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/tracing/SpanningTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/traffic/TopUpQueueTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/BatchAggregatorTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/ByteStringUtilTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/CheckedTTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/CheckedTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/DamlPackageLoaderTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/DelayUtilTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/EitherUtilTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/HexStringTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/IterableUtilTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/LazyValWithContextTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/LengthLimitedByteStringTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/LfGenerator.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/LfTransactionBuilder.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/LoggerUtilTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/MapsUtilTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/MessageRecorderTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/MonadUtilTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/OrderedBucketMergeHubTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/PathUtilsTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/PekkoUtilTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/RangeUtilTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/RateLimiterTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/ResourceUtilTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/SeqUtilTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/SimpleExecutionQueueTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/SingleUseCellTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/ThereafterTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/TraverseTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/TrieMapUtilTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/retry/JitterSpec.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/retry/PolicyTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/util/retry/SuccessSpec.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/version/CantonVersionTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/version/DamlLfVersionToProtocolVersionsTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/version/HasProtocolVersionedWrapperTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/version/HasTestCloseContext.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/version/ProtocolVersionCompatibilityTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/version/ProtocolVersionTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/version/SerializationDeserializationTest.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/canton/version/TestProtocolVersions.scala create mode 100644 canton-3x/community/common/src/test/scala/com/digitalasset/platform/daml/lf/testing/SampleParties.scala create mode 100644 canton-3x/community/demo/src/main/daml/ai-analysis/AIAnalysis.daml create mode 100644 canton-3x/community/demo/src/main/daml/ai-analysis/daml.yaml create mode 100644 canton-3x/community/demo/src/main/daml/bank/Bank.daml create mode 100644 canton-3x/community/demo/src/main/daml/bank/daml.yaml create mode 100644 canton-3x/community/demo/src/main/daml/doctor/Doctor.daml create mode 100644 canton-3x/community/demo/src/main/daml/doctor/daml.yaml create mode 100644 canton-3x/community/demo/src/main/daml/health-insurance/HealthInsurance.daml create mode 100644 canton-3x/community/demo/src/main/daml/health-insurance/daml.yaml create mode 100644 canton-3x/community/demo/src/main/daml/medical-records/MedicalRecord.daml create mode 100644 canton-3x/community/demo/src/main/daml/medical-records/daml.yaml create mode 100644 canton-3x/community/demo/src/main/scala/com/digitalasset/canton/demo/DemoUI.scala create mode 100644 canton-3x/community/demo/src/main/scala/com/digitalasset/canton/demo/ReferenceDemoScript.scala create mode 100644 canton-3x/community/demo/src/main/scala/com/digitalasset/canton/demo/Runner.scala create mode 100644 canton-3x/community/demo/src/pack/demo/README.md create mode 100644 canton-3x/community/demo/src/pack/demo/config/banking.conf create mode 100644 canton-3x/community/demo/src/pack/demo/config/medical.conf create mode 100644 canton-3x/community/demo/src/pack/demo/config/participant1.conf create mode 100644 canton-3x/community/demo/src/pack/demo/config/participant2.conf create mode 100644 canton-3x/community/demo/src/pack/demo/config/participant3.conf create mode 100644 canton-3x/community/demo/src/pack/demo/config/participant4.conf create mode 100644 canton-3x/community/demo/src/pack/demo/config/participant5.conf create mode 100644 canton-3x/community/demo/src/pack/demo/config/participant6.conf create mode 100644 canton-3x/community/demo/src/pack/demo/demo-native.sc create mode 100644 canton-3x/community/demo/src/pack/demo/demo.conf create mode 100644 canton-3x/community/demo/src/pack/demo/demo.sc create mode 100644 canton-3x/community/demo/src/pack/demo/images/canton-logo-small.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/canton-logo.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/canton.ico create mode 100755 canton-3x/community/demo/src/pack/demo/images/create-slides.sh create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo0.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo1.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo10.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo11.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo12.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo13.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo14.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo15.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo16.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo17.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo18.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo19.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo2.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo20.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo21.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo22.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo23.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo24.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo25.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo26.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo27.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo28.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo29.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo3.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo30.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo31.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo32.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo33.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo34.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo35.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo36.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo37.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo38.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo39.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo4.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo40.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo41.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo42.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo43.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo44.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo45.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo46.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo47.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo48.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo49.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo5.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo50.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo51.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo52.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo53.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo54.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo55.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo56.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo57.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo6.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo7.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo8.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/demo9.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/left.png create mode 100644 canton-3x/community/demo/src/pack/demo/images/right.png create mode 100755 canton-3x/community/demo/src/pack/start-demo-win.cmd create mode 100755 canton-3x/community/demo/src/pack/start-demo.command create mode 100644 canton-3x/community/demo/src/test/scala/com/digitalasset/canton/integration/tests/DemoExampleIntegrationTest.scala create mode 100644 canton-3x/community/domain/src/main/protobuf/buf.yaml create mode 100644 canton-3x/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v0/domain_initialization_service.proto create mode 100644 canton-3x/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v0/domain_service.proto create mode 100644 canton-3x/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v0/enterprise_mediator_administration_service.proto create mode 100644 canton-3x/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v0/enterprise_sequencer_administration_service.proto create mode 100644 canton-3x/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v0/enterprise_sequencer_connection_service.proto create mode 100644 canton-3x/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v0/mediator_initialization_service.proto create mode 100644 canton-3x/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v0/sequencer_administration_service.proto create mode 100644 canton-3x/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v0/sequencer_initialization_service.proto create mode 100644 canton-3x/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v0/sequencer_initialization_snapshot.proto create mode 100644 canton-3x/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v0/sequencer_version_service.proto create mode 100644 canton-3x/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v1/sequencer_initialization_snapshot.proto create mode 100644 canton-3x/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v2/mediator_initialization_service.proto create mode 100644 canton-3x/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v2/sequencer_initialization_service.proto create mode 100644 canton-3x/community/domain/src/main/protobuf/com/digitalasset/canton/domain/scalapb/package.proto create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/DomainNodeBootstrap.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/DomainNodeParameters.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/admin/grpc/GrpcDomainService.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/config/DomainConfig.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/config/DomainInitConfig.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/config/DomainParametersConfig.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/config/store/BaseNodeSettingsStore.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/config/store/DomainNodeSettingsStore.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/config/store/StoredDomainNodeSettings.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/initialization/DomainNodeSequencerClientFactory.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/initialization/DomainTopologyManagerIdentityInitialization.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/initialization/EmbeddedMediatorInitialization.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/initialization/TopologyManagementInitialization.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/manager/DomainManagerRuntime.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/manager/DomainManagerRuntimeFactory.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/ConfirmationResponseProcessor.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/FinalizedResponse.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/Mediator.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorEvent.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorEventDeduplicator.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorEventsProcessor.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorRuntimeFactory.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorStateInspection.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorVerdict.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/ResponseAggregation.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/ResponseAggregator.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/VerdictMessageId.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/VerdictSender.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/admin/gprc/InitializeMediatorRequest.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/admin/gprc/InitializeMediatorResponse.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/store/FinalizedResponseStore.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/store/MediatorDeduplicationStore.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/store/MediatorState.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/DomainMetrics.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerRuntime.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerRuntimeFactory.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/admin/client/SequencerAdminClient.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/admin/grpc/InitializeSequencerRequest.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/admin/grpc/InitializeSequencerResponse.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/authentication/AuthenticationTokenCache.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/authentication/MemberAuthenticationService.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/authentication/MemberAuthenticationStore.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/authentication/grpc/AsyncForwardingListener.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/authentication/grpc/IdentityContextHelper.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/authentication/grpc/SequencerAuthenticationServerInterceptor.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/authentication/grpc/SequencerConnectServerInterceptor.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/integrations/state/EphemeralState.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/BaseSequencer.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencer.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/DirectSequencerClientTransport.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/EventSignaller.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/FetchLatestEventsFlow.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/InFlightAggregation.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/InFlightAggregationUpdate.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/InFlightAggregations.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/LedgerIdentity.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/LocalSequencerStateEventSignaller.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/PartitionedTimestampGenerator.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/PollingEventSignaller.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/Sequencer.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerConfig.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerFactory.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerPruningStatus.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerReader.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerSnapshot.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerValidations.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriter.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterConfig.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterSource.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SignatureVerifier.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/WriterStartupError.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/errors/CreateSubscriptionError.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/errors/RegisterMemberError.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/errors/SequencerError.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/errors/SequencerWriteError.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/package.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerStore.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/InMemorySequencerStore.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerMemberCache.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStore.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerWriterStore.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/VersionedStatus.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/traffic/MemberTrafficSnapshot.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/traffic/SequencerMemberRateLimiterResult.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/traffic/SequencerRateLimitManager.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/traffic/SequencerTrafficStatus.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/CloseNotification.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/DirectSequencerSubscription.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/DirectSequencerSubscriptionFactory.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcHandshakeService.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcManagedSubscription.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerAdministrationService.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerAuthenticationService.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerConnectService.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerService.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerTopologyBootstrapService.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerVersionService.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/SubscriptionPool.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/server/DynamicDomainGrpcServer.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/service/HandshakeValidator.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/service/ServiceAgreementManager.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/service/store/ServiceAgreementAcceptanceStore.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/service/store/db/DbServiceAgreementAcceptanceStore.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/service/store/memory/InMemoryServiceAgreementAcceptanceStore.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/topology/DomainTopologyDispatcher.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/topology/DomainTopologyManager.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/topology/DomainTopologyManagerEventHandler.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/topology/DomainTopologyManagerRequestService.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/topology/client/DomainInitializationObserver.scala create mode 100644 canton-3x/community/domain/src/main/scala/com/digitalasset/canton/domain/topology/store/RegisterTopologyTransactionResponseStore.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/config/store/DomainNodeSettingsStoreTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/ConfirmationResponseProcessorTestV5.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/DefaultVerdictSenderTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/MediatorEventDeduplicatorTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/MediatorEventStageProcessorTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/MediatorStateTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/MediatorTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/ResponseAggregationTestV5.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/TestVerdictSender.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/store/FinalizedResponseStoreTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/store/MediatorDeduplicationStoreTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/metrics/DomainTestMetrics.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/authentication/AuthenticationTokenCacheTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/authentication/MemberAuthenticationServiceTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/authentication/MemberAuthenticationStoreTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/authentication/PassiveSequencerMemberAuthenticationStore.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/authentication/grpc/SequencerAuthenticationServerInterceptorTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/BaseSequencerTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/BftDomainSequencerApiTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencerApiTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/DomainSequencingTestUtils.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/FetchLatestEventsFlowTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/LocalSequencerStateEventSignallerTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/PartitionedTimestampGeneratorTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerApiTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerPruningStatusTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerReaderTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterSourceTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/TestDatabaseSequencerConfig.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerStoreTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/MultiTenantedSequencerStoreTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/NonBftDomainSequencerApiTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerMemberCacheTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStoreTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStoreTestInMemory.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/GrpcManagedSubscriptionTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerIntegrationTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerServiceTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/SubscriptionPoolTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/service/ServiceAgreementManagerTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/service/store/ServiceAgreementAcceptanceStoreTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/service/store/db/DbServiceAgreementAcceptanceStoreTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/service/store/memory/ServiceAgreementAcceptanceStoreTestInMemory.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/topology/DomainTopologyDispatcherTest.scala create mode 100644 canton-3x/community/domain/src/test/scala/com/digitalasset/canton/domain/topology/store/RegisterTopologyTransactionResponseStoreTest.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/console/BufferedProcessLogger.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/console/ConsoleEnvironmentTestHelpers.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/console/TestConsoleOutput.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/BaseEnvironmentDefinition.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/BaseIntegrationTest.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/CommonTestAliases.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/CommunityConfigTransforms.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/CommunityEnvironmentDefinition.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/CommunityTests.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/ConcurrentEnvironmentLimiter.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/EnvironmentSetup.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/EnvironmentSetupPlugin.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/HasCycleUtils.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/HasEnvironmentDefinition.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/IntegrationTestUtilities.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/NetworkBootstrap.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/TestEnvironment.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/package.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/metrics/ScopedInMemoryMetricsFactory.scala create mode 100644 canton-3x/community/integration-testing/src/main/scala/com/digitalasset/canton/version/TestProtocolVersions.scala create mode 100644 canton-3x/community/ledger-api/README.md create mode 100644 canton-3x/community/ledger-api/VERSION create mode 100644 canton-3x/community/ledger-api/docs/metering-report-schema.json create mode 100755 canton-3x/community/ledger-api/docs/post-process.sh create mode 100644 canton-3x/community/ledger-api/docs/rst_mmd.tmpl create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/scalapb/package.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/active_contracts_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/admin/config_management_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/admin/identity_provider_config_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/admin/metering_report_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/admin/object_meta.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/admin/package_management_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/admin/participant_pruning_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/admin/party_management_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/admin/user_management_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/command_completion_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/command_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/command_submission_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/commands.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/completion.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/contract_metadata.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/event.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/event_query_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/experimental_features.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/ledger_configuration_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/ledger_identity_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/ledger_offset.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/package_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/testing/time_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/trace_context.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/transaction.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/transaction_filter.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/transaction_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/value.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v1/version_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/README.md create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/command_completion_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/command_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/command_submission_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/commands.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/completion.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/event_query_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/package_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/participant_offset.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/reassignment.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/reassignment_command.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/state_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/testing/time_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/transaction.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/transaction_filter.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/update_service.proto create mode 100644 canton-3x/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/version_service.proto create mode 100644 canton-3x/community/ledger-service/http-json-perf/README.md create mode 100644 canton-3x/community/ledger-service/http-json-perf/daml/LargeAcs.daml create mode 100644 canton-3x/community/ledger-service/http-json-perf/release/json-api-perf-logback.xml create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/resources/gatling.conf create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/Config.scala create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/Main.scala create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/OracleRunner.scala create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/PostgresRunner.scala create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/QueryStoreBracket.scala create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/scenario/AsyncQueryConstantAcs.scala create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/scenario/Checks.scala create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/scenario/CreateAndExerciseCommand.scala create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/scenario/CreateCommand.scala create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/scenario/ExerciseCommand.scala create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/scenario/HasArchiveRequest.scala create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/scenario/HasCreateRequest.scala create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/scenario/HasQueryRequest.scala create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/scenario/HasRandomAmount.scala create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/scenario/MultiUserQueryScenario.scala create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/scenario/SimulationConfig.scala create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/scenario/SyncQueryConstantAcs.scala create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/scenario/SyncQueryMegaAcs.scala create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/scenario/SyncQueryNewAcs.scala create mode 100644 canton-3x/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/scenario/SyncQueryVariableAcs.scala create mode 100644 canton-3x/community/ledger-service/http-json/src/failure/scala/http/FailureTestsCustomToken.scala create mode 100644 canton-3x/community/ledger-service/http-json/src/failure/scala/http/FailureTestsUserToken.scala create mode 100644 canton-3x/community/ledger-service/http-json/src/failurelib/scala/http/FailureTests.scala create mode 100644 canton-3x/community/ledger-service/http-json/src/failurelib/scala/http/HttpTestFixture.scala create mode 100644 canton-3x/community/ledger-service/http-json/src/failurelib/scala/http/ToxicSandboxFixture.scala create mode 100644 canton-3x/community/ledger-service/http-json/src/itlib/resources/it/iouCreateCommand.json create mode 100644 canton-3x/community/ledger/ledger-README.md create mode 100644 canton-3x/community/ledger/ledger-api-auth-README.md create mode 100644 canton-3x/community/ledger/ledger-api-core/JCS.md create mode 100644 canton-3x/community/ledger/ledger-api-core/rootdoc.txt create mode 100644 canton-3x/community/ledger/ledger-api-core/src/.gitattributes create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/protobuf/daml/platform/index.proto create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/protobuf/daml/platform/page_tokens.proto create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/NO_AUTO_COPYRIGHT create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/h2database/V1__Append_only_schema.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/h2database/V1__Append_only_schema.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V10__non_optional_ledger_end.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V10__non_optional_ledger_end.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V11__drop_participant_side_command_deduplication.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V11__drop_participant_side_command_deduplication.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V12__participant_metering.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V12__participant_metering.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V13__remove_events_view.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V13__remove_events_view.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V14__modifiable_users_and_parties.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V14__modifiable_users_and_parties.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V15__creates_driver_metadata.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V15__creates_driver_metadata.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V16__identity_provider_config.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V16__identity_provider_config.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V17__etq_completions.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V17__etq_completions.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V18__etq_tables_and_indexes.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V18__etq_tables_and_indexes.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V19__identity_provider_id_users_parties.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V19__identity_provider_id_users_parties.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V1__Append_only_schema.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V1__Append_only_schema.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V20__etq_data_migration.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V20__etq_data_migration.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V21__etq_drop_tx_id_indexes.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V21__etq_drop_tx_id_indexes.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V22__audience_idp_config.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V22__audience_idp_config.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V23__add_domain_id.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V23__add_domain_id.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V24__add_reassignment_events.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V24__add_reassignment_events.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V25__add_indexes_for_incomplete_reassignment_queries.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V25__add_indexes_for_incomplete_reassignment_queries.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V26__add_key_maintainers.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V26__add_key_maintainers.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V27__add_trace_context.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V27__add_trace_context.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V28__drop_ledger_id.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V28__drop_ledger_id.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V29__nullable_transfer_submitter.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V29__nullable_transfer_submitter.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V2__Drop_json_indexes.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V2__Drop_json_indexes.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V3__Add_string_interning.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V3__Add_string_interning.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V4__add_ledger_end_string_interning_columnt_to_parameters.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V4__add_ledger_end_string_interning_columnt_to_parameters.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V5__activate_party_interning.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V5__activate_party_interning.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V6__activate_template_id_interning.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V6__activate_template_id_interning.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V7_1__drop_event_id_indexes.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V7_1__drop_event_id_indexes.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V7__add_filter_table.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V7__add_filter_table.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V8__add_user_managment.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V8__add_user_managment.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V9__add_transaction_metering.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/oracle/V9__add_transaction_metering.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V100_0__Append_only_schema.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V100_0__Append_only_schema.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V100_1__Append_only_cleanup.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V100_1__Append_only_cleanup.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V100_2__Append_only_indices.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V100_2__Append_only_indices.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V100_3__Append_only_vacuum.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V100_3__Append_only_vacuum.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V101__drop_configuration_from_parameters_table.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V101__drop_configuration_from_parameters_table.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V102__add_initialization_indices.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V102__add_initialization_indices.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V103__remove_duplicate_index.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V103__remove_duplicate_index.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V104__rename_packages_size_column.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V104__rename_packages_size_column.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V105__drop_unique_index_constraints.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V105__drop_unique_index_constraints.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V106__add_rejection_status_proto_column.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V106__add_rejection_status_proto_column.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V107__parameters_table_cleanup.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V107__parameters_table_cleanup.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V108__drop_parties.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V108__drop_parties.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V109__Add_all_divulgence_pruning_offset.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V109__Add_all_divulgence_pruning_offset.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V10_0__Extract_Event_Data.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V10_0__Extract_Event_Data.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V10_2__Extract_Event_Data.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V10_2__Extract_Event_Data.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V110__add_deduplication_info_to_completions.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V110__add_deduplication_info_to_completions.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V111__timestamp_to_bigint.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V111__timestamp_to_bigint.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V112__add_string_interning.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V112__add_string_interning.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V113__enable_string_interning.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V113__enable_string_interning.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V114__activate_string_interning_for_parties_and_templates.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V114__activate_string_interning_for_parties_and_templates.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V116__add_filter_table.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V116__add_filter_table.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V117_1__drop_event_id_indexes.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V117_1__drop_event_id_indexes.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V117__vacuum_analyze.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V117__vacuum_analyze.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V118__add_user_managment.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V118__add_user_managment.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V119__add_transaction_metering.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V119__add_transaction_metering.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V11__Disclosures_index.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V11__Disclosures_index.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V120__non_optional_ledger_end.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V120__non_optional_ledger_end.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V121__drop_participant_side_command_deduplication.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V121__drop_participant_side_command_deduplication.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V122__participant_metering.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V122__participant_metering.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V123__remove_events_view.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V123__remove_events_view.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V124__modifiable_users_and_parties.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V124__modifiable_users_and_parties.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V125__creates_driver_metadata.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V125__creates_driver_metadata.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V126__identity_provider_config.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V126__identity_provider_config.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V127__etq_completions.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V127__etq_completions.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V128__etq_tables_and_indexes.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V128__etq_tables_and_indexes.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V129__identity_provider_id_users_parties.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V129__identity_provider_id_users_parties.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V12__Add_configuration.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V12__Add_configuration.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V130__etq_data_migration.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V130__etq_data_migration.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V131__etq_drop_tx_id_indexes.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V131__etq_drop_tx_id_indexes.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V132__audience_idp_config.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V132__audience_idp_config.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V133__add_domain_id.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V133__add_domain_id.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V134__add_reassignment_events.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V134__add_reassignment_events.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V135__add_indexes_for_incomplete_reassignment_queries.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V135__add_indexes_for_incomplete_reassignment_queries.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V136__add_key_maintainers.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V136__add_key_maintainers.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V137__add_trace_context.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V137__add_trace_context.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V138__drop_ledger_id.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V138__drop_ledger_id.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V139__nullable_transfer_submitter.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V139__nullable_transfer_submitter.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V13__Party_entries.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V13__Party_entries.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V14__Package_entries.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V14__Package_entries.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V15__Loosen_transaction_check.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V15__Loosen_transaction_check.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V16__Create_command_completions_table.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V16__Create_command_completions_table.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V17__Command_deduplication.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V17__Command_deduplication.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V18__Backfill_completions.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V18__Backfill_completions.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V19__Fix_Completions.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V19__Fix_Completions.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V1__Init.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V1__Init.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V20__Events_new_schema.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V20__Events_new_schema.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V21__Stable_offsets.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V21__Stable_offsets.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V22__Remove_maximum_record_time.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V22__Remove_maximum_record_time.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V23__Delete_checkpoints.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V23__Delete_checkpoints.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V24__Stable_offsets_archival.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V24__Stable_offsets_archival.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V26_0__Contracts_new_schema.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V26_0__Contracts_new_schema.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V26_2__Contract_create_arg_not_null.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V26_2__Contract_create_arg_not_null.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V27__Events_table_fixes.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V27__Events_table_fixes.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V2_0__Contract_divulgence.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V2_0__Contract_divulgence.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V30__Drop_old_schema.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V30__Drop_old_schema.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V31__Event_witnesses_single_table.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V31__Event_witnesses_single_table.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V32_0__Drop_archived_contracts.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V32_0__Drop_archived_contracts.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V33__Add_witnesses_to_participant_events.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V33__Add_witnesses_to_participant_events.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V34__Parties_is_local.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V34__Parties_is_local.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V35__event_sequential_id.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V35__event_sequential_id.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V36__drop_participant_id.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V36__drop_participant_id.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V37__add_participant_id_to_parameters.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V37__add_participant_id_to_parameters.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V39__Participant-pruning.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V39__Participant-pruning.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V40__multiple_submitters.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V40__multiple_submitters.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V41__hash_indices.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V41__hash_indices.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V42__Convert_hash_indices.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V42__Convert_hash_indices.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V43__explicit_compression.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V43__explicit_compression.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V44__offset_as_text.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V44__offset_as_text.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V45__fix_large_size_index_issues.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V45__fix_large_size_index_issues.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V4_0__Add_parties.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V4_0__Add_parties.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V5__Add_packages.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V5__Add_packages.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V6__External_Ledger_Offset.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V6__External_Ledger_Offset.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V7__Command_deduplication.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V7__Command_deduplication.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V8__Contract_Divulgence.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V8__Contract_Divulgence.sql create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V9__Contract_Divulgence.sha256 create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/postgres/V9__Contract_Divulgence.sql create mode 100755 canton-3x/community/ledger/ledger-api-core/src/main/resources/db/migration/recompute-sha256sums.sh create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/metering-keys/README.md create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/resources/metering-keys/community.json create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/error/generator/ErrorCategoryDocItem.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/error/generator/ErrorCategoryInventoryDocsGenerator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/error/generator/ErrorCodeDocItem.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/error/generator/ErrorCodeDocumentationGenerator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/error/generator/ErrorCodeInventoryDocsGenerator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/error/generator/ErrorGroupDocItem.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/error/generator/app/ErrorCategoryInventoryDocsGenApp.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/error/generator/app/ErrorCodeInventoryDocsGenApp.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/error/generator/app/Main.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/ProxyCloseable.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/SubmissionIdGenerator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/TraceIdentifiers.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/ValidationLogger.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/AuthService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/AuthServiceJWT.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/AuthServiceJWTPayload.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/AuthServiceWildcard.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/AuthorizationError.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/Authorizer.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/CachedJwtVerifierLoader.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/Claims.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/IdentityProviderAwareAuthServiceImpl.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/IdentityProviderConfigLoader.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/JwtVerifierLoader.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/OngoingAuthorizationObserver.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/UserRightsChangeAsyncChecker.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/client/LedgerCallCredentials.java create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/interceptor/AsyncForwardingListener.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/interceptor/AuthorizationInterceptor.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/interceptor/IdentityProviderAwareAuthService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/ActiveContractsServiceAuthorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandCompletionServiceAuthorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandCompletionServiceV2Authorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandServiceAuthorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandServiceV2Authorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandSubmissionServiceAuthorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandSubmissionServiceV2Authorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/ConfigManagementServiceAuthorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/EventQueryServiceAuthorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/EventQueryServiceV2Authorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/IdentityProviderConfigServiceAuthorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/LedgerConfigurationServiceAuthorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/LedgerIdentityServiceAuthorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/MeteringReportServiceAuthorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/PackageManagementServiceAuthorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/PackageServiceAuthorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/PackageServiceV2Authorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/ParticipantPruningServiceAuthorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/PartyManagementServiceAuthorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/StateServiceAuthorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/TimeServiceAuthorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/TimeServiceV2Authorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/TransactionServiceAuthorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/UpdateServiceAuthorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/UserManagementServiceAuthorization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/domain/domain.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/grpc/DropRepeated.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/grpc/GrpcActiveContractsService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/grpc/GrpcApiService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/grpc/GrpcHealthService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/grpc/GrpcLedgerConfigurationService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/grpc/GrpcPackageService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/grpc/Logging.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/grpc/StreamingServiceLifecycleManagement.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/health/HealthChecks.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/health/HealthStatus.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/health/ReportsHealth.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/command/completion/CompletionEndRequest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/command/completion/CompletionStreamRequest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/command/submission/SubmitReassignmentRequest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/command/submission/SubmitRequest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/event/GetEventsByContractIdRequest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/event/GetEventsByContractKeyRequest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/transaction/GetLedgerEndRequest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/transaction/GetTransactionByEventIdRequest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/transaction/GetTransactionByIdRequest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/transaction/GetTransactionTreesRequest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/transaction/GetTransactionsRequest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/CommandCompletionService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/CommandService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/CommandSubmissionService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/EventQueryService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/TransactionService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CommandsValidator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CompletionServiceRequestValidator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/DeduplicationPeriodValidator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/EventQueryServiceRequestValidator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/FieldValidator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/LedgerOffsetValidator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/PartyNameChecker.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/PartyValidator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/ResourceAnnotationValidator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/SubmitAndWaitRequestValidator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/SubmitRequestValidator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/TransactionFilterValidator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/TransactionServiceRequestValidator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/ValidateDisclosedContracts.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/ValidationErrors.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/ValueValidator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/GrpcChannel.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/LedgerClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/configuration/CommandClientConfiguration.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/configuration/LedgerClientChannelConfiguration.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/configuration/LedgerClientConfiguration.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/configuration/LedgerIdRequirement.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/EventQueryServiceClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/acs/ActiveContractSetClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/acs/withoutledgerid/ActiveContractSetClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/admin/IdentityProviderConfigClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/admin/MeteringReportClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/admin/PackageManagementClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/admin/ParticipantPruningManagementClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/admin/PartyManagementClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/admin/UserManagementClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/commands/CommandClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/commands/CommandCompletionSource.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/commands/CommandSubmission.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/commands/CommandSubmissionFlow.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/commands/CommandUpdaterFlow.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/commands/CompletionStreamElement.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/commands/SynchronousCommandClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/commands/withoutledgerid/CommandClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/identity/LedgerIdentityClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/pkg/PackageClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/pkg/withoutledgerid/PackageClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/testing/time/StaticTime.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/transactions/TransactionClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/transactions/TransactionSource.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/transactions/withoutledgerid/TransactionClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/version/VersionClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/services/version/withoutledgerid/VersionClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/withoutledgerid/LedgerClient.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/v2/ContractStore.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/v2/IdentityProvider.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/v2/IndexActiveContractsService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/v2/IndexCompletionsService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/v2/IndexConfigManagementService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/v2/IndexConfigurationService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/v2/IndexEventQueryService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/v2/IndexPackagesService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/v2/IndexParticipantPruningService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/v2/IndexPartyManagementService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/v2/IndexService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/v2/IndexTransactionsService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/v2/IndexerPartyDetails.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/v2/LedgerEndService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/v2/MaximumLedgerTimeService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/v2/MeteringStore.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/v2/PartyEntry.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/index/v2/package.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/v2/CompletionInfo.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/v2/DivulgedContract.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/v2/PruningResult.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/v2/ReadService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/v2/Reassignment.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/v2/ReassignmentCommand.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/v2/SubmitterInfo.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/v2/TransactionMeta.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/v2/Update.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/v2/WriteConfigService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/v2/WritePackagesService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/v2/WriteParticipantPruningService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/v2/WritePartyService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/v2/WriteService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/v2/metrics/TimedReadService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/v2/metrics/TimedWriteService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/v2/package.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/runner/common/OptConfigValue.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/runner/common/PureConfigReaderWriter.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/ApiOffset.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/DispatcherState.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/InMemoryState.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/LedgerApiServer.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/TemplatePartiesFilter.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ActiveStreamMetricsInterceptor.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiException.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiServiceOwner.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiServices.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ExecutionSequencerFactoryOwner.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/GrpcServer.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/LedgerApiService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/LedgerFeatures.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/SeedService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/TimeServiceBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/TimedIndexService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/TruncatedStatusInterceptor.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/configuration/LedgerConfigurationInitializer.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/configuration/LedgerConfigurationSubscription.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/configuration/LedgerConfigurationSubscriptionFromIndex.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/configuration/RateLimitingConfig.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/error/ErrorInterceptor.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/AuthorityResolver.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/CommandExecutionResult.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/CommandExecutor.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/DynamicDomainParameterGetter.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/LedgerTimeAwareCommandExecutor.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/ResolveMaximumLedgerTime.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/StoreBackedCommandExecutor.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/TimedCommandExecutor.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/meteringreport/HmacSha256.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/meteringreport/Jcs.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/meteringreport/JcsSigner.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/meteringreport/MeteringReport.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/meteringreport/MeteringReportGenerator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/meteringreport/MeteringReportKey.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ratelimiting/LimitResult.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ratelimiting/MemoryCheck.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ratelimiting/RateLimitingInterceptor.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ratelimiting/StreamCheck.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ratelimiting/ThreadpoolCheck.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiActiveContractsService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandCompletionService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandCompletionServiceV2.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandServiceV2.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandSubmissionService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandSubmissionServiceV2.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiConversions.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiEventQueryService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiEventQueryServiceV2.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiLedgerConfigurationService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiLedgerIdentityService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiPackageService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiPackageServiceV2.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiStateService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiTimeService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiTimeServiceV2.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiTransactionService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiUpdateService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiVersionService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiVersionServiceV2.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/RejectionGenerators.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/StreamMetrics.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiConfigManagementService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiIdentityProviderConfigService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiMeteringReportService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPackageManagementService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiParticipantPruningService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPartyManagementService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiUserManagementService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/IdentityProviderExists.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/PartyRecordsExist.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/SynchronousResponse.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/Utils.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/package.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandCompletionServiceImpl.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandServiceImpl.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandSubmissionServiceImpl.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/logging/package.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/package.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/tracking/CancellableTimeoutSupport.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/tracking/CompletionResponse.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/tracking/SubmissionTracker.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/transaction/EventQueryServiceImpl.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/transaction/TransactionServiceImpl.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/update/FieldNames.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/update/IdentityProviderConfigUpdateMapper.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/update/PartyRecordUpdateMapper.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/update/UpdateMapperBase.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/update/UpdatePath.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/update/UpdatePathError.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/update/UpdatePathsTrie.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/update/UpdateRequestsPaths.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/update/UserUpdateMapper.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/update/update.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/config/CommandServiceConfig.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/config/IndexServiceConfig.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/config/InvalidConfigException.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/config/Readers.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/config/ServerRole.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/config/UserManagementServiceConfig.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/ContractStoreBasedMaximumLedgerTimeService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdater.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceImpl.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceOwner.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/IndexerConfig.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/IndexerServiceOwner.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/IndexerStartupMode.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/JdbcIndexer.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/MeteringAggregator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/PackageMetadataViewConfig.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/RecoveringIndexer.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/UpdatePackageMetadataView.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/ha/HaCoordinator.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/ha/KillSwitchCaptor.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/ha/PollingChecker.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/ha/PreemptableSequence.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/package.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/AsyncSupport.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/BatchN.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/BatchingParallelIngestionPipe.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/InitializeParallelIngestion.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerFactory.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerSubscription.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/localstore/CachedIdentityProviderConfigStore.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/localstore/CachedUserManagementStore.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/localstore/IdentityProviderManagementConfig.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/localstore/InMemoryIdentityProviderConfigStore.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/localstore/InMemoryPartyRecordStore.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/localstore/InMemoryUserManagementStore.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/localstore/Ops.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/localstore/PersistentIdentityProviderConfigStore.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/localstore/PersistentPartyRecordStore.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/localstore/PersistentUserManagementStore.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/localstore/api/IdentityProviderConfigStore.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/localstore/api/IdentityProviderConfigUpdate.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/localstore/api/PartyRecord.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/localstore/api/PartyRecordStore.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/localstore/api/UserManagementStore.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/localstore/utils/LocalAnnotationsUtils.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/package.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/packages/DeduplicatingPackageLoader.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/CompletionFromTransaction.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/DbSupport.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/DbType.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/EventSequentialId.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/FlywayMigrations.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/ScalaPbStreamingOptimizations.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/Conversions.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/DbDto.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/DbDtoToStringsForInterning.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/StorageBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/StorageBackendFactory.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDto.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToMeteringDbDto.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/VerifiedDataSource.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/CommonStorageBackendFactory.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/CompletionStorageBackendTemplate.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/ComposableQuery.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/ConfigurationStorageBackendTemplate.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/ContractStorageBackendTemplate.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/DataSourceStorageBackendImpl.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/EventReaderQueries.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/EventStorageBackendTemplate.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/Field.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/IngestionStorageBackendTemplate.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/InitHookDataSourceProxy.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/IntegrityStorageBackendImpl.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/MeteringParameterStorageBackendImpl.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/MeteringStorageBackendImpl.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/PackageStorageBackendTemplate.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/ParameterStorageBackendImpl.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/PartyStorageBackendTemplate.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/QueryStrategy.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/Schema.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/SimpleSqlAsVectorOf.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/StringInterningStorageBackendImpl.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/Table.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/TransactionPointwiseQueries.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/TransactionStreamingQueries.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2ContractStorageBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2DBLockStorageBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2DataSourceStorageBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2EventStorageBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2Field.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2FunctionAliases.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2QueryStrategy.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2ResetStorageBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2Schema.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2StorageBackendFactory.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2Table.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/IdentityProviderStorageBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/IdentityProviderStorageBackendImpl.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/ParticipantMetadataBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/PartyRecordStorageBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/PartyRecordStorageBackendImpl.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/ResourceVersionOps.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/UserManagementStorageBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/UserManagementStorageBackendImpl.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/oracle/OracleDBLockStorageBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/oracle/OracleDataSourceStorageBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/oracle/OracleEventStorageBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/oracle/OracleField.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/oracle/OracleQueryStrategy.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/oracle/OracleResetStorageBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/oracle/OracleSchema.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/oracle/OracleStorageBackendFactory.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/oracle/OracleTable.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PGField.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PGSchema.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PGTable.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresDBLockStorageBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresDataSourceStorageBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresEventStorageBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresQueryStrategy.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresResetStorageBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresStorageBackendFactory.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/ContractKeyStateCache.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/ContractStateCaches.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/ContractsStateCache.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/InMemoryFanoutBuffer.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/LedgerEndCache.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/MutableCacheBackedContractStore.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/StateCache.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/package.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/BufferedCommandCompletionsReader.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/BufferedStreamsReader.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/BufferedTransactionByIdReader.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/CommandCompletionsReader.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/DatabaseSelfServiceError.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/DbDispatcher.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/EventProjectionProperties.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/HikariJdbcConnectionProvider.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/JdbcConnectionProvider.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDao.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/LedgerDao.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/PaginatingAsyncStream.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/PersistenceResponse.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/QueryRange.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/SequentialWriteDao.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ACSReader.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/BufferedTransactionsReader.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/CompressionMetrics.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/CompressionStrategy.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ContractLoader.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ContractStateEvent.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ContractsReader.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/EventIdsUtils.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/EventsRange.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/EventsReader.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/EventsTable.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/FilterUtils.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/IdPageSizing.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/LfValueTranslation.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/QueryNonPruned.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/Raw.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ReassignmentStreamReader.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionConversion.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionLogUpdatesConversions.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionPointwiseReader.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionsFlatStreamReader.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionsReader.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionsTreeStreamReader.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/Utils.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/entries/ConfigurationEntry.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/entries/PackageLedgerEntry.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/entries/PartyLedgerEntry.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interfaces/LedgerDaoContractsReader.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interfaces/TransactionLogUpdate.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interning/RawStringInterning.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interning/StringInterning.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interning/StringInterningView.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/packagemeta/PackageMetadata.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/packagemeta/PackageMetadataView.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/serialization/Compression.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/serialization/ValueSerializer.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/utils/ConcurrencyLimiter.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/utils/Telemetry.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/util/Ctx.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/util/pekkostreams/ExtractMaterializedValue.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/util/pekkostreams/ImmutableIterable.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/util/pekkostreams/MaxInFlight.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/db/migration/EmptyJavaMigration.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/db/migration/oracle/package.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/db/migration/postgres/V10_1__Populate_Event_Data.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/db/migration/postgres/V25__Backfill_Participant_Events.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/db/migration/postgres/V26_1__Fill_create_argument.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/db/migration/postgres/V28__Fix_key_hashes.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/db/migration/postgres/V29__Fix_participant_events.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/db/migration/postgres/V2_1__Rebuild_Acs.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/db/migration/postgres/V32_1__Fix_key_hashes.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/db/migration/postgres/V38__Update_value_versions.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/db/migration/postgres/V3__Recompute_Key_Hash.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/main/scala/db/migration/postgres/V4_1__Collect_Parties.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/resources/OracleLog.properties create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/resources/config/test.conf create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/resources/config/test2.conf create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/resources/config/testp.conf create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/resources/test-metering-key.json create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/error/generator/ErrorCodeDocumentationGeneratorSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/DomainMocks.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/IdentityProviderIdSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/TraceIdentifiersTest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/ValueConversionRoundTripTest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/AuthServiceJWTCodecSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/AuthorizationInterceptorSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/AuthorizerSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/OngoingAuthorizationObserverSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/grpc/DropRepeatedSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/grpc/GrpcHealthServiceSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/CompletionServiceRequestValidatorTest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/DeduplicationPeriodValidatorSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/EventQueryServiceRequestValidatorTest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/IdentifierValidatorTest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/ResourceAnnotationValidationsSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/SubmitRequestValidatorTest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/TransactionServiceRequestValidatorTest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/ValidateDisclosedContractsTest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/ValidatorTestUtils.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/client/configuration/LedgerIdRequirementTest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/client/services/commands/CommandSubmissionFlowTest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/client/services/commands/CommandUpdaterFlowTest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/runner/common/ArbitraryConfig.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/runner/common/PureConfigReaderWriterSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/DispatcherStateSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/InMemoryStateSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentTest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/GrpcServerSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/SeedingSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/SimpleTimeServiceBackendSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/configuration/LedgerConfigurationSubscriptionFromIndexSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/error/ErrorInterceptorSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/LedgerTimeAwareCommandExecutorSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/ResolveMaximumLedgerTimeSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/StoreBackedCommandExecutorSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/TestDynamicDomainParameterGetter.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/meteringreport/HmacSha256Spec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/meteringreport/JcsSignerSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/meteringreport/JcsSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/meteringreport/MeteringReportGeneratorSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/meteringreport/MeteringReportKeySpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/meteringreport/MeteringReportSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/ratelimiting/LimitResultSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/ratelimiting/MemoryCheckSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/ratelimiting/RateLimitingInterceptorSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandServiceSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandSubmissionServiceSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiConfigManagementServiceSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiMeteringReportServiceSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPackageManagementServiceSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPartyManagementServiceSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiUserManagementServiceSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandServiceImplSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandSubmissionServiceImplSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/tracking/CancellableTimeoutSupportSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/tracking/SubmissionTrackerSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/tls/OcspResponderFixture.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/tls/TlsCertificateRevocationCheckingSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/tls/TlsFixture.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/tls/TlsSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/update/IdentityProviderConfigUpdateMapperSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/update/PartyRecordUpdateMapperSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/update/UpdatePathSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/update/UpdatePathsTrieSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/update/UserUpdateMapperSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/validation/ErrorFactoriesSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/ContractStoreBasedMaximumLedgerTimeServiceSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdaterSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/IndexServiceImplSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/MeteringAggregatorSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/RecoveringIndexerIntegrationSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/RecoveringIndexerSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/EndlessReadService.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/EndlessReadServiceSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/HaCoordinatorSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/IndexerStabilitySpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/IndexerStabilitySpecOracle.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/IndexerStabilitySpecPostgres.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/IndexerStabilityTestFixture.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/TestDBLockStorageBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/TestDBLockStorageBackendSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/BatchNSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/BatchingParallelIngestionPipeSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerFactorySpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerSubscriptionSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/CachedIdentityProviderConfigStoreSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/CachedUserManagementStoreSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/ConcurrentChangeControlTests.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/DbDispatcherLeftOpsSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/IdentityProviderConfigStoreSpecBase.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/IdentityProviderConfigStoreTests.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/InMemoryIdentityProviderConfigStoreSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/InMemoryPartyRecordStoreSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/InMemoryUserManagementStoreSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/PartyRecordStoreSpecBase.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/PartyRecordStoreTests.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/PersistentIdentityProviderConfigStoreH2Spec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/PersistentIdentityProviderConfigStoreSpecOracle.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/PersistentIdentityProviderConfigStoreSpecPostgres.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/PersistentIdentityProviderConfigStoreTests.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/PersistentPartyRecordStoreH2Spec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/PersistentPartyRecordStoreSpecOracle.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/PersistentPartyRecordStoreSpecPostgres.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/PersistentPartyRecordStoreTests.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/PersistentStoreSpecBase.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/PersistentUserStoreH2Spec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/PersistentUserStoreSpecOracle.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/PersistentUserStoreSpecPostgres.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/PersistentUserStoreTests.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/UserStoreSpecBase.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/localstore/UserStoreTests.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/multidomain/MultiDomainIndexComponentTest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/packages/DeduplicatingPackageLoaderSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/CompletionFromTransactionSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/FlywayMigrationsSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/DbDtoEq.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/DbDtoEqSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/DbDtoToStringsForInterningSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/PruningDtoQueries.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendProvider.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendSpecH2.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendSpecOracle.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendSpecPostgres.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendSuite.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestValues.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsCompletions.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsContracts.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsDBLock.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsEvents.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsIDPConfig.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsIngestion.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsInitialization.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsInitializeIngestion.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsIntegrity.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsMeteringParameters.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsMigrationPruning.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsParticipantMetadata.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsPartyRecord.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsPruning.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsReadMetering.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsReassignmentEvents.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsReset.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsStringInterning.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsTimestamps.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsTransactionStreamsEvents.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsUserManagement.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsWriteMetering.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToMeteringDbDtoSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/common/ComposableQuerySpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/h2/H2DataSourceStorageBackendSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/ContractStateCachesSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/InMemoryFanoutBufferSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/MutableCacheBackedContractStoreRaceTests.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/MutableCacheBackedContractStoreSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/StateCacheSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedStreamsReaderSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedTransactionByIdReaderSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/EventProjectionPropertiesSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoActiveContractsSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackend.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackendH2Database.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackendOracle.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackendPostgresql.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoCompletionsSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoConfigurationSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoContractsSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoDivulgenceSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoEventsSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoExceptionSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoH2DatabaseSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoPackagesSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoPartiesSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoSpecOracle.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoSpecPostgres.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoSuite.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoTransactionTreesSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoTransactionsSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoTransactionsWriterSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/SequentialWriteDaoSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/events/ACSReaderSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/events/GroupContiguousSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/events/PekkoStreamParallelBatchedLoaderSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/events/TransactionConversionSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/events/UtilsSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/entries/LedgerEntry.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/interning/MockStringInterning.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/interning/RawStringInterningSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/interning/StringInterningDomainSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/interning/StringInterningViewSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/migration/DbConnectionAndDataSourceAroundEach.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/migration/DbDataTypes.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/migration/MigrationTestSupport.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/migration/oracle/OracleAroundEachForMigrations.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/migration/postgres/MigrationFrom111To116TestPostgres.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/migration/postgres/PostgresAroundEachForMigrations.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/migration/postgres/RemovalOfJavaMigrationsPostgres.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/packagemeta/PackageMetadataSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/packagemeta/PackageMetadataViewSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/testing/oracle/OracleAround.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/testing/oracle/OracleAroundAll.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/testing/oracle/OracleAroundEach.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/testing/oracle/OracleAroundSuite.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/testing/postgresql/PostgresAround.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/testing/postgresql/PostgresAroundAll.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/testing/postgresql/PostgresAroundEach.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/testing/postgresql/PostgresAroundSuite.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/testing/postgresql/PostgresDatabase.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/testing/postgresql/PostgresResource.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/testing/postgresql/PostgresServer.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/utils/ConcurrencyLimiterSpec.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/util/ConcurrentBufferedProcessLogger.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/util/api/TimestampConversionTest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/util/pekkostreams/ExtractSingleMaterializedValueTest.scala create mode 100644 canton-3x/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/util/pekkostreams/MaxInFlightTest.scala create mode 100644 canton-3x/community/ledger/ledger-api-errors-README.md create mode 100644 canton-3x/community/ledger/ledger-api-it/src/test/scala/com/digitalasset/canton/ledger/sandbox/MetricsOwner.scala create mode 100644 canton-3x/community/ledger/ledger-api-string-interning-benchmark/src/main/scala/com/digitalasset/canton/platform/store/interning/BenchmarkState.scala create mode 100644 canton-3x/community/ledger/ledger-api-string-interning-benchmark/src/main/scala/com/digitalasset/canton/platform/store/interning/InitializationTimeBenchmark.scala create mode 100644 canton-3x/community/ledger/ledger-api-string-interning-benchmark/src/main/scala/com/digitalasset/canton/platform/store/interning/UpdateTimeBenchmark.scala create mode 100644 canton-3x/community/ledger/ledger-api-tools/indexer-benchmark-README.md create mode 100644 canton-3x/community/ledger/ledger-api-tools/src/main/resources/indexer-benchmark-logback.xml create mode 100644 canton-3x/community/ledger/ledger-api-tools/src/main/scala/com/digitalasset/canton/ledger/api/auth/Main.scala create mode 100644 canton-3x/community/ledger/ledger-api-tools/src/main/scala/com/digitalasset/canton/ledger/indexerbenchmark/Config.scala create mode 100644 canton-3x/community/ledger/ledger-api-tools/src/main/scala/com/digitalasset/canton/ledger/indexerbenchmark/IndexerBenchmark.scala create mode 100644 canton-3x/community/ledger/ledger-api-tools/src/main/scala/com/digitalasset/canton/ledger/indexerbenchmark/IndexerBenchmarkResult.scala create mode 100644 canton-3x/community/ledger/ledger-api-tools/src/main/scala/com/digitalasset/canton/ledger/metering/Main.scala create mode 100644 canton-3x/community/ledger/ledger-common/errors-README.md create mode 100644 canton-3x/community/ledger/ledger-common/src/main/protobuf/com/daml/ledger/configuration/ledger_configuration.proto create mode 100644 canton-3x/community/ledger/ledger-common/src/main/protobuf/ledger_configuration.rst create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/caching/Cache.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/caching/CaffeineCache.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/caching/DropwizardStatsCounter.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/caching/MappedCache.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/caching/NoCache.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/caching/SizedCache.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/DeduplicationPeriod.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/domain/package.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/refinements/ApiTypes.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/tls/DecryptionParameters.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/tls/OcspProperties.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/tls/ProtocolDisabler.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/tls/SecretsUrl.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/tls/TlsConfiguration.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/tls/TlsInfo.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/tls/TlsVersion.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/util/DurationConversion.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/util/LedgerEffectiveTimeTolerance.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/util/TimeProvider.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/util/TimestampConversion.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/util/ToleranceWindow.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/configuration/Configuration.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/configuration/LedgerTimeModel.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/configuration/package.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/CommonErrors.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/IndexErrors.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/LedgerApiErrors.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/PackageServiceErrors.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/ParticipantErrorGroup.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/AdminServiceErrors.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/AuthorizationChecksErrors.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/CommandExecutionErrors.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/ConsistencyErrors.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/IdentityProviderConfigServiceErrors.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/PartyManagementServiceErrors.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/RequestValidationErrors.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/UserManagementServiceErrors.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/WriteServiceRejectionErrors.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/offset/Offset.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/participant/state/v2/ChangeId.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/participant/state/v2/SubmissionResult.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/CommandMetrics.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/DatabaseMetricsFactory.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/ExecutionMetrics.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/IdentityProviderConfigStoreMetrics.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/IndexDBMetrics.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/IndexMetrics.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/IndexedUpdatesMetrics.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/IndexerMetrics.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/LAPIMetrics.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/Metrics.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/ParallelIndexerMetrics.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/PartyRecordStoreMetrics.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/PruningMetrics.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/ServicesMetrics.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/UserManagementMetrics.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/platform/api/v1/event/EventOps.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/platform/common/MismatchException.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/platform/common/ParticipantIdNotFoundException.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/platform/participant/util/LfEngineToApi.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/platform/pekkostreams/dispatcher/Dispatcher.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/platform/pekkostreams/dispatcher/DispatcherImpl.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/platform/pekkostreams/dispatcher/SignalDispatcher.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/platform/pekkostreams/dispatcher/SubSource.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/platform/services/time/TimeProviderType.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/resources/application.conf create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/gen-test-certificates.sh create mode 100644 canton-3x/community/ledger/ledger-common/src/test/resources/openssl-alternative-template.cnf create mode 100644 canton-3x/community/ledger/ledger-common/src/test/resources/openssl-template.cnf create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/ca.crt create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/ca.key create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/ca_alternative.crt create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/ca_alternative.key create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/ca_alternative.pem create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/client-revoked.crt create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/client-revoked.csr create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/client-revoked.key create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/client-revoked.pem create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/client.crt create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/client.csr create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/client.key create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/client.pem create mode 100644 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/index.txt create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/ocsp.crt create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/ocsp.csr create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/ocsp.key.pem create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/server.crt create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/server.csr create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/server.key create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/server.pem create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-certificates/server.pem.enc create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-models/benchtool-tests-1.15.dar create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-models/daml-lf/encoder/test-1.11.dar create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-models/daml-lf/encoder/test-1.12.dar create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-models/daml-lf/encoder/test-1.13.dar create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-models/daml-lf/encoder/test-1.14.dar create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-models/daml-lf/encoder/test-1.15.dar create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-models/daml-lf/encoder/test-1.6.dar create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-models/daml-lf/encoder/test-1.7.dar create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-models/daml-lf/encoder/test-1.8.dar create mode 100755 canton-3x/community/ledger/ledger-common/src/test/resources/test-models/daml-lf/encoder/test-1.dev.dar create mode 100644 canton-3x/community/ledger/ledger-common/src/test/resources/test-models/model-tests-1.15.dar create mode 100644 canton-3x/community/ledger/ledger-common/src/test/resources/test-models/semantic-tests-1.15.dar create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/caching/ConcurrentCacheBehaviorSpecBase.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/caching/ConcurrentCacheCachingSpecBase.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/caching/ConcurrentCacheEvictionSpecBase.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/caching/ConcurrentCacheSpecBase.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/caching/MapBackedCacheForTesting.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/caching/MapBackedCacheForTestingSpec.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/caching/MappedCacheSpec.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/caching/NoCacheSpec.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/caching/SizedCacheSpec.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/api/DeduplicationPeriodSpec.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/api/grpc/GrpcClientResource.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/api/tls/DecryptionParametersTest.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/api/tls/ProtocolDisablerTest.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/api/tls/SecretsUrlTest.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/api/tls/TlsConfigurationTest.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/api/util/TimestampConversionSpec.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/configuration/ConfigurationSpec.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/configuration/LedgerTimeModelSpec.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/error/testpackage/DeprecatedError.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/error/testpackage/SeriousError.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/error/testpackage/subpackage/MildErrorsParent.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/platform/api/checks/ledgerid/GetLedgerIdentityIT.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/platform/participant/util/ValueConversions.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/platform/pekkostreams/FutureTimeouts.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/platform/pekkostreams/dispatcher/DispatcherSpec.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/platform/pekkostreams/dispatcher/DispatcherTest.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/platform/pekkostreams/dispatcher/SignalDispatcherTest.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/testing/utils/TestModels.scala create mode 100644 canton-3x/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/testing/utils/TestResourceUtils.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/README.md create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/resources/application.conf create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/cliopts/GlobalLogLevel.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/cliopts/Http.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/cliopts/Metrics.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/cliopts/package.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/daml/lf/value/json/ApiCodecCompressed.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/daml/lf/value/json/ApiValueImplicits.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/daml/lf/value/json/JsonVariant.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/daml/lf/value/json/NavigatorModelAliases.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/AcsTxStreams.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/domain.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/BeginBookmark.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/ClientUtil.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/ContractStreamStep.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/GraphExtensions.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/IdentifierConverters.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/InsertDeleteStep.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/PekkoStreamsUtils.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/CommandService.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/ContractsService.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/Endpoints.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/EndpointsCompanion.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/ErrorMessages.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/HealthService.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/HttpApiConfig.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/HttpApiServer.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/HttpService.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/JsonApiConfig.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/LedgerClientJwt.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/MeteringReportService.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/PackageManagementService.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/PackageService.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/PartiesService.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/StartSettings.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/StaticContentEndpoints.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/WebSocketService.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/WebsocketEndpoints.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/admin/GetPackageResponse.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/domain.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/endpoints/ContractList.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/endpoints/CreateAndExercise.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/endpoints/MeteringReportEndpoint.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/endpoints/PackagesAndDars.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/endpoints/Parties.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/endpoints/RouteSetup.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/endpoints/UserManagement.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/ApiValueToJsValueConverter.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/DomainJsonDecoder.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/DomainJsonEncoder.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/ExtraFormats.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/HttpCodec.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/JsValueToApiValueConverter.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/JsonError.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/JsonProtocol.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/ResponseFormats.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/SprayJson.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/metrics/HttpApiMetrics.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/query/ValuePredicate.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/ApiValueToLfValueConverter.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/ClientUtil.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/Collections.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/Commands.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/ErrorOps.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/FlowUtil.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/FutureUtil.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/GrpcHttpErrorCodes.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/IdentifierConverters.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/JwtParties.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/LedgerOffsetUtil.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/Logging.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/NewBoolean.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/ProtobufByteStrings.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/Transactions.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/package.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/ledger/service/Grpc.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/ledger/service/LedgerReader.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/ledger/service/MetadataReader.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/ledger/service/TemplateIds.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/pureconfigutils/SharedConfigReaders.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/test/daml/JsonEncodingTest.daml create mode 100644 canton-3x/community/ledger/ledger-json-api/src/test/daml/daml.yaml create mode 100644 canton-3x/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/daml/lf/value/json/ApiCodecCompressedSpec.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/fetchcontracts/AcsTxStreamsTest.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/fetchcontracts/util/ContractStreamStepTest.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/fetchcontracts/util/InsertDeleteStepTest.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/CommandServiceTest.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/DomainSpec.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/Generators.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/GeneratorsTest.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/PackageServiceTest.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/endpoints/MeteringReportEndpointTest.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/endpoints/RouteSetupTest.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/json/JsonProtocolTest.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/json/ResponseFormatsTest.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/util/ApiValueToLfValueConverterTest.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/util/FlowUtilTest.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/util/JwtPartiesTest.scala create mode 100644 canton-3x/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/util/LedgerOffsetUtilTest.scala create mode 100644 canton-3x/community/ledger/metrics/collectd/README.md create mode 100644 canton-3x/community/ledger/metrics/collectd/collectd.conf create mode 100644 canton-3x/community/ledger/metrics/collection/README.md create mode 100644 canton-3x/community/ledger/metrics/collection/docker-compose.yml create mode 100644 canton-3x/community/ledger/metrics/collection/grafana/dashboards/collectd.json create mode 100644 canton-3x/community/ledger/metrics/collection/grafana/dashboards/graphite-carbon-metrics.json create mode 100644 canton-3x/community/ledger/metrics/collection/grafana/dashboards/ledger-submissions.json create mode 100644 canton-3x/community/ledger/metrics/collection/grafana/dashboards/transaction-stream.json create mode 100644 canton-3x/community/ledger/metrics/collection/grafana/provisioning/dashboards/dashboards.yaml create mode 100644 canton-3x/community/ledger/metrics/collection/grafana/provisioning/datasources/graphite.yaml create mode 100644 canton-3x/community/ledger/metrics/collection/graphite/.gitignore create mode 100644 canton-3x/community/ledger/metrics/collection/graphite/default_conf/storage-aggregation.conf create mode 100644 canton-3x/community/ledger/metrics/collection/graphite/default_conf/storage-schemas.conf create mode 100755 canton-3x/community/ledger/metrics/collection/graphite/run_once/init-config create mode 100755 canton-3x/community/ledger/metrics/collection/reset-all.sh create mode 100755 canton-3x/community/ledger/metrics/collection/summary.js create mode 100755 canton-3x/community/ledger/participant-state/src/test/lib/daml/simple_package_list/Simple.daml create mode 100755 canton-3x/community/ledger/participant-state/src/test/lib/daml/simple_package_optional/Simple.daml create mode 100755 canton-3x/community/ledger/participant-state/src/test/lib/daml/simple_package_party/Simple.daml create mode 100755 canton-3x/community/ledger/participant-state/src/test/lib/daml/simple_package_text_map/Simple.daml create mode 100755 canton-3x/community/ledger/participant-state/src/test/lib/daml/simple_package_tuple/Simple.daml create mode 100755 canton-3x/community/ledger/participant-state/src/test/lib/daml/simple_package_variant/Simple.daml create mode 100644 canton-3x/community/lib/Blake2b/src/main/java/org/bouncycastle/crypto/digests/canton/Blake2bDigest.java create mode 100644 canton-3x/community/lib/README.md create mode 100644 canton-3x/community/lib/daml-copy-common-0/.gitkeep create mode 100644 canton-3x/community/lib/daml-copy-common-1/.gitkeep create mode 100644 canton-3x/community/lib/daml-copy-common-2/.gitkeep create mode 100644 canton-3x/community/lib/daml-copy-common-3/.gitkeep create mode 100644 canton-3x/community/lib/daml-copy-common-4/.gitkeep create mode 100644 canton-3x/community/lib/daml-copy-common-5/.gitkeep create mode 100644 canton-3x/community/lib/daml-copy-common/.gitkeep create mode 100644 canton-3x/community/lib/daml-copy-common/src/main/scala/com/daml/bazeltools/BazelRunfiles.scala create mode 120000 canton-3x/community/lib/daml-copy-protobuf-java/protobuf-daml-symlinks/archive/com create mode 120000 canton-3x/community/lib/daml-copy-protobuf-java/protobuf-daml-symlinks/transaction/com create mode 100644 canton-3x/community/lib/daml-copy-testing-0/.gitkeep create mode 120000 canton-3x/community/lib/daml-copy-testing-0/ledger-resources-test-symlink/scala/com create mode 120000 canton-3x/community/lib/daml-copy-testing-0/observability-metrics-test-symlink/scala/com create mode 120000 canton-3x/community/lib/daml-copy-testing-0/observability-tracing-test-symlink/scala/com create mode 120000 canton-3x/community/lib/daml-copy-testing-0/protobuf-daml-symlinks/ledger-api-sample-service/hello.proto create mode 120000 canton-3x/community/lib/daml-copy-testing-0/rs-grpc-bridge-test-symlink/java/com create mode 100644 canton-3x/community/lib/daml-copy-testing-1/.gitkeep create mode 100644 canton-3x/community/lib/daml-copy-testing/.gitkeep create mode 120000 canton-3x/community/lib/daml-copy-testing/rs-grpc-pekko-test-symlink/scala/com create mode 120000 canton-3x/community/lib/daml-copy-testing/sample-service-test-symlink/scala/com create mode 100644 canton-3x/community/lib/pekko/LICENSE create mode 100644 canton-3x/community/lib/pekko/src/main/scala/org/apache/pekko/stream/scaladsl/BroadcastHub.scala create mode 100644 canton-3x/community/lib/pekko/src/test/scala/org/apache/pekko/stream/scaladsl/BroadcastHubSpec.scala create mode 100644 canton-3x/community/lib/pekko/src/test/scala/org/apache/pekko/stream/testkit/StreamSpec.scala create mode 100644 canton-3x/community/lib/pekko/src/test/scala/org/apache/pekko/stream/testkit/Utils.scala create mode 100644 canton-3x/community/lib/pekko/src/test/scala/org/apache/pekko/testkit/Coroner.scala create mode 100644 canton-3x/community/lib/pekko/src/test/scala/org/apache/pekko/testkit/PekkoSpec.scala create mode 100644 canton-3x/community/lib/slick/LICENSE.txt create mode 100644 canton-3x/community/lib/slick/src/main/scala/slick/jdbc/canton/StaticQuery.scala create mode 100644 canton-3x/community/lib/wartremover/src/main/scala/com/digitalasset/canton/DiscardedFuture.scala create mode 100644 canton-3x/community/lib/wartremover/src/main/scala/com/digitalasset/canton/FutureLikeTester.scala create mode 100644 canton-3x/community/lib/wartremover/src/main/scala/com/digitalasset/canton/FutureTraverse.scala create mode 100644 canton-3x/community/lib/wartremover/src/main/scala/com/digitalasset/canton/GlobalExecutionContext.scala create mode 100644 canton-3x/community/lib/wartremover/src/main/scala/com/digitalasset/canton/NonUnitForEach.scala create mode 100644 canton-3x/community/lib/wartremover/src/main/scala/com/digitalasset/canton/RequireBlocking.scala create mode 100644 canton-3x/community/lib/wartremover/src/main/scala/com/digitalasset/canton/SlickString.scala create mode 100644 canton-3x/community/lib/wartremover/src/main/scala/com/digitalasset/canton/TryFailed.scala create mode 100644 canton-3x/community/lib/wartremover/src/test/scala/com/digitalasset/canton/DiscardedFutureTest.scala create mode 100644 canton-3x/community/lib/wartremover/src/test/scala/com/digitalasset/canton/FutureTraverseTest.scala create mode 100644 canton-3x/community/lib/wartremover/src/test/scala/com/digitalasset/canton/GlobalExecutionContextTest.scala create mode 100644 canton-3x/community/lib/wartremover/src/test/scala/com/digitalasset/canton/NonUnitForEachTest.scala create mode 100644 canton-3x/community/lib/wartremover/src/test/scala/com/digitalasset/canton/RequireBlockingTest.scala create mode 100644 canton-3x/community/lib/wartremover/src/test/scala/com/digitalasset/canton/SlickStringTest.scala create mode 100644 canton-3x/community/lib/wartremover/src/test/scala/com/digitalasset/canton/TryFailedTest.scala create mode 100644 canton-3x/community/participant/src/main/daml/AdminWorkflows.daml create mode 100644 canton-3x/community/participant/src/main/daml/PingPong.daml create mode 100644 canton-3x/community/participant/src/main/daml/daml.yaml create mode 100644 canton-3x/community/participant/src/main/daml/ping-pong-vacuum/PingPongVacuum.daml create mode 100644 canton-3x/community/participant/src/main/daml/ping-pong-vacuum/daml.yaml create mode 100644 canton-3x/community/participant/src/main/protobuf/buf.yaml create mode 100644 canton-3x/community/participant/src/main/protobuf/com/daml/ledger/api/v1/package.proto create mode 100644 canton-3x/community/participant/src/main/protobuf/com/digitalasset/canton/participant/admin/v0/domain_connectivity.proto create mode 100644 canton-3x/community/participant/src/main/protobuf/com/digitalasset/canton/participant/admin/v0/enterprise_participant_replication_service.proto create mode 100644 canton-3x/community/participant/src/main/protobuf/com/digitalasset/canton/participant/admin/v0/inspection_service.proto create mode 100644 canton-3x/community/participant/src/main/protobuf/com/digitalasset/canton/participant/admin/v0/package_service.proto create mode 100644 canton-3x/community/participant/src/main/protobuf/com/digitalasset/canton/participant/admin/v0/participant_repair_service.proto create mode 100644 canton-3x/community/participant/src/main/protobuf/com/digitalasset/canton/participant/admin/v0/party_name_management.proto create mode 100644 canton-3x/community/participant/src/main/protobuf/com/digitalasset/canton/participant/admin/v0/ping_pong_service.proto create mode 100644 canton-3x/community/participant/src/main/protobuf/com/digitalasset/canton/participant/admin/v0/pruning_service.proto create mode 100644 canton-3x/community/participant/src/main/protobuf/com/digitalasset/canton/participant/admin/v0/resource_management_service.proto create mode 100644 canton-3x/community/participant/src/main/protobuf/com/digitalasset/canton/participant/admin/v0/traffic_control_service.proto create mode 100644 canton-3x/community/participant/src/main/protobuf/com/digitalasset/canton/participant/admin/v0/transfer_service.proto create mode 100644 canton-3x/community/participant/src/main/protobuf/com/digitalasset/canton/participant/admin/v1/participant_repair_service.proto create mode 100644 canton-3x/community/participant/src/main/protobuf/com/digitalasset/canton/participant/protocol/v0/ledger_sync_event.proto create mode 100644 canton-3x/community/participant/src/main/protobuf/com/digitalasset/canton/participant/protocol/v0/submission_tracking.proto create mode 100644 canton-3x/community/participant/src/main/protobuf/com/digitalasset/canton/participant/scalapb/package.proto create mode 100644 canton-3x/community/participant/src/main/resources/dar/AdminWorkflows.dar create mode 120000 canton-3x/community/participant/src/main/resources/ledger-api/VERSION create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/GlobalOffset.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/LocalOffset.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeCommon.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeParameters.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeX.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/Pruning.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/AdminWorkflowConfig.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/AdminWorkflowService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/AdminWorkflowServices.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/CantonPackageServiceError.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/DomainConnectivityService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageDependencyResolver.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageOps.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PingService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/ResourceLimits.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/ResourceManagementService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/TransferService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/ActiveContract.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/SerializableContractWithDomainId.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcDomainConnectivityService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcInspectionService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcPackageService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcParticipantRepairService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcPartyNameManagementService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcPingService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcPruningService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcResourceManagementService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcTrafficControlService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcTransferService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/AcsInspection.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/Error.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/TimestampValidation.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/MigrateContracts.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairContext.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairRequest.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairServiceError.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/package.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/workflows/PackageID.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/config/AuthServiceConfig.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/config/LocalParticipantConfig.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/config/ParticipantInitConfig.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/AgreementClient.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/AgreementService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/DomainAliasManager.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/DomainConnectionConfig.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/DomainRegistry.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/DomainRegistryHelpers.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/DomainServiceClient.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/ParticipantInitializeTopology.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/grpc/GrpcDomainRegistry.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/event/AcsChangeListener.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/event/RecordOrderPublisher.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/event/RecordTime.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonAdminTokenAuthService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonExternalClockBackend.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonLedgerApiServerWrapper.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonTimeServiceBackend.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/JwtTokenUtilities.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiJdbcUrl.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiStorage.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/StartableStoppableLedgerApiDependentServices.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/StartableStoppableLedgerApiServer.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/client/CommandSubmitterWithRetry.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/client/JavaDecodeUtil.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/client/LedgerConnection.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/client/ValueRemapper.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/metrics/ParticipantMetrics.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/metrics/PruningMetrics.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/metrics/TransactionProcessingMetrics.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/package.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/AbstractMessageProcessor.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/AuthenticationValidator.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/AuthorizationValidator.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/BadRootHashMessagesRequestProcessor.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/CanSubmitTransfer.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/DefaultMessageDispatcher.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/MessageDispatcher.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/MessageProcessingStartingPoint.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ParticipantTopologyTerminateProcessingX.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/Phase37Synchronizer.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ProcessingSteps.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessor.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/RepairProcessor.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/RequestCounterAllocator.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/RequestJournal.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/SerializableContractAuthenticatorImpl.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/SubmissionTracker.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TopologyTransactionsToEventsX.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessor.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ActivenessResult.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ActivenessSet.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/CommitSet.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetector.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/IllegalConflictDetectionStateException.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/LockableState.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/LockableStates.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/LockableStatus.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/NaiveRequestTracker.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/RequestTracker.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/ChangeId.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/CommandDeduplicator.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/ConfirmationRequestFactory.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/ContractEnrichmentFactory.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/DomainsFilter.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/EncryptedViewMessageFactory.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/InFlightSubmission.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/InFlightSubmissionTracker.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/SeedGenerator.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/SerializableSubmissionId.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/SubmissionTrackingData.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/TransactionTreeFactory.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/TransactionTreeFactoryImpl.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/TransactionTreeFactoryImplV3.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/UsableDomain.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/WatermarkTracker.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/AdmissibleDomains.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/ContractsDomainData.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/ContractsTransfer.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/DomainRank.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/DomainRouter.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/DomainSelector.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/DomainStateProvider.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/TransactionData.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/AdminPartiesAndParticipants.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/AutomaticTransferIn.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/IncompleteTransferData.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/PartyParticipantPermissions.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/RecentTimeProofProvider.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferCoordination.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferData.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingSteps.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessor.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInValidation.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferKnownAndVetted.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingSteps.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessor.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessorError.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutRequest.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutRequestValidated.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutValidation.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutValidationNonTransferringParticipant.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutValidationTransferringParticipant.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutValidationUtil.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferProcessingSteps.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ConfirmationResponseFactory.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ContractConsistencyChecker.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ExtractUsedAndCreated.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ExtractUsedContractsFromRootViews.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/InternalConsistencyChecker.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceChecker.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/PendingTransaction.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/RecipientsValidator.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/TimeValidator.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/TransactionValidationResult.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/UsedAndCreated.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ViewValidationResult.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/NoOpPruningProcessor.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/PruneObserver.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/SortedReconciliationIntervals.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/SortedReconciliationIntervalsProvider.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/SortedReconciliationIntervalsProviderFactory.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/scheduler/ParticipantPruningScheduler.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/scheduler/ParticipantSchedulersParameters.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/AcsCommitmentStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActiveContractStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/CommandDeduplicationStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ConflictDetectionStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ContractKeyJournal.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ContractLookup.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ContractStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DamlLfSerializers.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DamlPackageStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DomainConnectionConfigStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DomainParameterStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ExtendedContractLookup.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/HasPrunable.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/InFlightSubmissionStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/MultiDomainEventLog.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ParticipantEventLog.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ParticipantNodeEphemeralState.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ParticipantNodePersistentState.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ParticipantPruningSchedulerStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ParticipantPruningStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ParticipantSettingsStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/RegisteredDomainsStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/RequestJournalStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SerializableLedgerSyncEvent.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ServiceAgreementStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SingleDimensionEventLog.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SubmissionTrackerStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncDomainEphemeralState.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncDomainEphemeralStateFactory.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncDomainPersistentState.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/TransferStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/data/ActiveContractsData.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbAcsCommitmentStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbCommandDeduplicationStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractKeyJournal.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbDamlPackageStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbDomainConnectionConfigStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbDomainParameterStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbInFlightSubmissionStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbMultiDomainEventLog.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbParticipantEventLog.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbParticipantPruningSchedulerStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbParticipantPruningStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbParticipantSettingsStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbRegisteredDomainsStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbRequestJournalStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbServiceAgreementStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbSingleDimensionEventLog.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbSubmissionTrackerStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbSyncDomainPersistentState.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbTransferStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/ParticipantStorageImplicits.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryAcsCommitmentStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryActiveContractStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryCommandDeduplicationStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryContractKeyJournal.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryContractStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryDamlPackageStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryDomainConnectionConfigStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryDomainParameterStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryInFlightSubmissionStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryMultiDomainEventLog.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryOffsetsLookup.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryParticipantEventLog.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryParticipantPruningSchedulerStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryParticipantPruningStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryParticipantSettingsStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryRegisteredDomainsStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryRequestJournalStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryServiceAgreementStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemorySingleDimensionEventLog.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemorySubmissionTrackerStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemorySyncDomainPersistentState.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryTransferStore.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/TransferCache.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonAuthorityResolver.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonDynamicDomainParameterGetter.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CommandDeduplicationError.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/ConnectedDomainsLookup.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/EventTranslationStrategy.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/LedgerSyncEvent.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/ParticipantEventPublisher.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/PartyAllocation.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomainMigration.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomainPersistentStateManager.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/TimelyRejectNotifier.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/TimestampedEvent.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/TransactionRoutingError.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/UpstreamOffsetConvert.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifier.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyManager.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/TopologyComponentFactory.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/client/MissingKeysAlerter.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/traffic/TrafficStateController.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/traffic/TrafficStateTopUpSubscription.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/util/DAMLe.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/util/LedgerApiUtil.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/util/StateChange.scala create mode 100644 canton-3x/community/participant/src/main/scala/com/digitalasset/canton/participant/util/TimeOfChange.scala create mode 100644 canton-3x/community/participant/src/test/resources/daml/illformed.dar create mode 100644 canton-3x/community/participant/src/test/resources/daml/illformed.lf create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/DefaultParticipantStateValues.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/GrpcTrafficControlServiceTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/MockLedgerAcs.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageOpsForTesting.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageOpsTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageServiceTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PingServiceTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/ResilientTransactionsSubscriptionTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/data/GeneratorsData.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/data/SerializableContractWithDomainIdTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/inspection/AcsInspectionTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/version/SerializationDeserializationTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/domain/grpc/ParticipantDomainTopologyServiceTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiJdbcUrlTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/ledger/api/client/CommandSubmitterWithRetryTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/metrics/ParticipantTestMetrics.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/MessageDispatcherTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/Phase37SynchronizerTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessorTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/RequestCounterAllocatorTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/RequestJournalTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/SerializableContractAuthenticatorImplTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/SubmissionTrackerTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/TestProcessingSteps.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/TopologyTransactionsToEventsXTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingStepsTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ActivenessCheckTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetectionHelpers.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetectorTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/LockableStatesTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/NaiveRequestTrackerTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/RequestTrackerTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/CommandDeduplicatorTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/ConfirmationRequestFactoryTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/DomainSelectionFixture.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/DomainsFilterTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/NoCommandDeduplicator.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/SeedGeneratorTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/TestSubmissionTrackingData.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/TransactionTreeFactoryImplTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/WatermarkTrackerTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/routing/DomainSelectorTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/routing/WorkflowIdExtractionTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/DAMLeTestInstance.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/IncompleteTransferDataTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TestTransferCoordination.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferDataTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingStepsTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInValidationTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingStepsTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutValidationTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferResultHelpers.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/ExtractUsedAndCreatedTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/InternalConsistencyCheckerTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceCheckerTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/RecipientsValidatorTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/TimeValidatorTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/SortedReconciliationIntervalsHelpers.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/SortedReconciliationIntervalsProviderTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/SortedReconciliationIntervalsTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/AcsCommitmentStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ActiveContractStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/CommandDeduplicationStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ContractKeyJournalTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ContractStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/DamlLfSerializersTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/DamlPackageStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/DomainConnectionConfigStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/DomainParameterStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ExtendedContractLookupTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/HookedAcs.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/InFlightSubmissionStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/MultiDomainEventLogTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ParticipantEventLogTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ParticipantPruningSchedulerStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ParticipantPruningStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ParticipantSettingsStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/PreHookRequestJournalStore.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/PreUpdateHookCkj.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/RegisteredDomainsStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/RequestJournalStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ServiceAgreementStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/SingleDimensionEventLogTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/SubmissionTrackerStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/SyncDomainEphemeralStateFactoryTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ThrowOnWriteCommitmentStore.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ThrowingAcs.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ThrowingCkj.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/TransferStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbAcsCommitmentStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbCommandDeduplicationStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbContractKeyJournalTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbContractStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbDamlPackageStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbDomainConnectionConfigStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbDomainParameterStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbEventLogTestResources.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbInFlightSubmissionStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbMultiDomainEventLogTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbParticipantEventLogTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbParticipantPruningSchedulerStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbParticipantPruningStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbParticipantSettingsStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbRegisteredDomainsStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbRequestJournalStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbServiceAgreementStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbSingleDimensionEventLogTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbSubmissionTrackerStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbTransferStoreTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/AcsCommitmentStoreTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ActiveContractStoreTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/CommandDeduplicationStoreTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ContractKeyJournalTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ContractStoreTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/DamlPackageStoreTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/DomainConnectionConfigStoreTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/DomainParameterStoreTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/InFlightSubmissionStoreTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/MultiDomainEventLogTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ParticipantEventLogTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ParticipantPruningSchedulerStoreTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ParticipantPruningStoreTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ParticipantSettingsStoreTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/RegisteredDomainsStoreTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/RequestJournalStoreTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ServiceAgreementStoreTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/SingleDimensionEventLogTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/SubmissionTrackerStoreTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/TransferCacheTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/TransferStoreTestInMemory.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/sync/CantonAuthorityResolverTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/sync/CantonSyncServiceTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/sync/DefaultLedgerSyncEvent.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/sync/ParticipantEventPublisherTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/sync/TimelyRejectNotifierTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/DomainOutboxTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifierTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/QueueBasedDomainOutboxXTest.scala create mode 100644 canton-3x/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/StoreBasedDomainOutboxXTest.scala create mode 100644 canton-3x/community/sequencer-driver/src/main/scala/com/digitalasset/canton/domain/block/BlockOrdering.scala create mode 100644 canton-3x/community/sequencer-driver/src/main/scala/com/digitalasset/canton/domain/block/BlockOrderingSequencer.scala create mode 100644 canton-3x/community/sequencer-driver/src/main/scala/com/digitalasset/canton/domain/block/SequencerDriver.scala create mode 100644 canton-3x/community/testing/src/main/java/com/digitalasset/canton/logging/BufferingLogger.java create mode 100644 canton-3x/community/testing/src/main/java/com/digitalasset/canton/logging/SuppressingLoggerDispatcher.java create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/BaseTest.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/BigDecimalImplicits.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/CloseableTest.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/HasExecutionContext.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/HasExecutorService.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/HasTempDirectory.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/LogReporter.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/MockedNodeParameters.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/ProtocolVersionChecks.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/RepeatableTest.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/SerializationDeserializationTestHelpers.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/TestMetrics.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/UniquePortGenerator.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/crypto/CryptoTestHelper.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/crypto/EncryptionTest.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/crypto/PrivateKeySerializationTest.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/crypto/RandomTest.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/crypto/SigningTest.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/crypto/TestHash.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/crypto/provider/symbolic/SymbolicCrypto.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/crypto/provider/symbolic/SymbolicCryptoProvider.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/crypto/provider/symbolic/SymbolicPrivateCrypto.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/crypto/provider/symbolic/SymbolicPureCrypto.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/ledger/api/IsStatusException.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/ledger/api/MockMessages.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/logging/LogEntry.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/logging/SuppressingLogger.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/logging/SuppressionRule.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/metrics/CommonMockMetrics.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/metrics/InMemoryMetricsFactory.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/protocol/TestDomainParameters.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/store/db/DbStorageSetup.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/topology/DefaultTestIdentities.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactoryBase.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactoryX.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/tracing/TestTelemetry.scala create mode 100644 canton-3x/community/testing/src/main/scala/com/digitalasset/canton/version/GeneratorsVersion.scala create mode 100644 canton-3x/community/testing/src/test/resources/logback-test.xml create mode 100644 canton-3x/community/testing/src/test/scala/com/digitalasset/canton/UniquePortGeneratorTest.scala create mode 100644 canton-3x/community/testing/src/test/scala/com/digitalasset/canton/lifecycle/OnShutdownRunnerTest.scala create mode 100644 canton-3x/community/testing/src/test/scala/com/digitalasset/canton/logging/LogEntryTest.scala create mode 100644 canton-3x/community/testing/src/test/scala/com/digitalasset/canton/logging/SuppressingLoggerTest.scala create mode 100644 canton-3x/community/testing/src/test/scala/com/digitalasset/canton/tracing/TraceContextTest.scala create mode 100644 canton-3x/community/util-external/src/main/scala/com/digitalasset/canton/config/ConfidentialConfigWriter.scala create mode 100644 canton-3x/community/util-external/src/main/scala/com/digitalasset/canton/config/KeyStoreConfig.scala create mode 100644 canton-3x/community/util-external/src/main/scala/com/digitalasset/canton/config/RequireTypes.scala create mode 100644 canton-3x/community/util-external/src/main/scala/com/digitalasset/canton/error/Alarm.scala create mode 100644 canton-3x/community/util-external/src/main/scala/com/digitalasset/canton/error/EnterpriseSequencerErrorGroups.scala create mode 100644 canton-3x/community/util-external/src/main/scala/com/digitalasset/canton/error/EthereumErrors.scala create mode 100644 canton-3x/community/util-external/src/main/scala/com/digitalasset/canton/error/FabricErrors.scala create mode 100644 canton-3x/community/util-external/src/main/scala/com/digitalasset/canton/error/LogOnCreation.scala create mode 100644 canton-3x/community/util-external/src/main/scala/com/digitalasset/canton/error/SequencerBaseError.scala create mode 100644 canton-3x/community/util-external/src/main/scala/com/digitalasset/canton/time/TimeProvider.scala create mode 100644 canton-3x/community/util-external/src/main/scala/com/digitalasset/canton/util/VersionUtil.scala create mode 100644 canton-3x/community/util-external/src/main/scala/com/digitalasset/canton/version/EthereumContractVersion.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/logging/ErrorLoggingContext.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/logging/LoggingContextUtil.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/logging/LoggingContextWithTrace.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/logging/NamedLoggerFactory.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/logging/NamedLogging.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/logging/NamedLoggingContext.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/logging/TracedLogger.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/logging/TracedLoggerOps.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/logging/package.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/metrics/OnDemandMetricsReader.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/telemetry/ConfiguredOpenTelemetry.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/telemetry/OpenTelemetryFactory.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/NoTracing.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/TelemetryTracing.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/TraceContext.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/TraceContextGrpc.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/Traced.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/TracerProvider.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/TracingConfig.scala create mode 100644 canton-3x/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/W3CTraceContext.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/main/scala/com/daml/error/BaseError.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/main/scala/com/daml/error/ContextualizedErrorLogger.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/main/scala/com/daml/error/DamlError.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/main/scala/com/daml/error/ErrorCategory.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/main/scala/com/daml/error/ErrorClass.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/main/scala/com/daml/error/ErrorCode.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/main/scala/com/daml/error/ErrorGroup.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/main/scala/com/daml/error/ErrorResource.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/main/scala/com/daml/error/GrpcStatuses.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/main/scala/com/daml/error/NoLogging.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/main/scala/com/daml/error/samples/Example.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/main/scala/com/daml/error/utils/DeserializedCantonError.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/main/scala/com/daml/error/utils/ErrorDetails.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/test/scala/com/daml/error/ContextualizedErrorLoggerSpec.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/test/scala/com/daml/error/ErrorCodeSpec.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/test/scala/com/daml/error/ErrorGroupSpec.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/test/scala/com/daml/error/ErrorsAssertions.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/test/scala/com/daml/error/GrpcStatusesSpec.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/test/scala/com/daml/error/SecuritySensitiveMessageSpec.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/test/scala/com/daml/error/samples/SampleClientSideSpec.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/test/scala/com/daml/error/utils/BenignError.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/test/scala/com/daml/error/utils/DeserializedCantonErrorSpec.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/test/scala/com/daml/error/utils/ErrorDetailsSpec.scala create mode 100644 canton-3x/daml-common-staging/daml-errors/src/test/scala/com/daml/error/utils/SevereError.scala diff --git a/.bazelignore b/.bazelignore index a00f60b982..4a1ed58bde 100644 --- a/.bazelignore +++ b/.bazelignore @@ -7,4 +7,4 @@ language-support/ts/node_modules/ language-support/ts/packages/node_modules/ navigator/frontend/node_modules/ compatibility/ -canton-3x/ + diff --git a/azure-cron.yml b/azure-cron.yml index 96d721caf0..59dff0dd9f 100644 --- a/azure-cron.yml +++ b/azure-cron.yml @@ -223,7 +223,7 @@ jobs: set -euo pipefail git fetch git checkout origin/main - ci/build-canton-3x.sh HEAD + ci/build-canton-3x.sh env: GITHUB_TOKEN: $(CANTON_READONLY_TOKEN) - template: ci/tell-slack-failed.yml diff --git a/canton-3x/community/LICENSE-open-source-bundle.txt b/canton-3x/community/LICENSE-open-source-bundle.txt new file mode 100644 index 0000000000..d44bb0cc20 --- /dev/null +++ b/canton-3x/community/LICENSE-open-source-bundle.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/GrpcCtlRunner.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/GrpcCtlRunner.scala new file mode 100644 index 0000000000..4babba3aa9 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/GrpcCtlRunner.scala @@ -0,0 +1,69 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client + +import cats.data.EitherT +import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand +import com.digitalasset.canton.ledger.api.auth.client.LedgerCallCredentials +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil +import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} +import com.digitalasset.canton.util.LoggerUtil +import io.grpc.ManagedChannel +import io.grpc.stub.AbstractStub + +import scala.concurrent.duration.Duration +import scala.concurrent.{ExecutionContext, Future} + +/** Run a command using the default workflow + */ +class GrpcCtlRunner( + maxRequestDebugLines: Int, + maxRequestDebugStringLength: Int, + val loggerFactory: NamedLoggerFactory, +) extends NamedLogging { + + /** Runs a command + * @return Either a printable error as a String or a Unit indicating all was successful + */ + def run[Req, Res, Result]( + instanceName: String, + command: GrpcAdminCommand[Req, Res, Result], + channel: ManagedChannel, + token: Option[String], + timeout: Duration, + )(implicit ec: ExecutionContext, traceContext: TraceContext): EitherT[Future, String, Result] = { + + val baseService: command.Svc = command + .createService(channel) + .withInterceptors(TraceContextGrpc.clientInterceptor) + + val service = token.fold(baseService)(LedgerCallCredentials.authenticatingStub(baseService, _)) + + for { + request <- EitherT.fromEither[Future](command.createRequest()) + response <- submitRequest(command)(instanceName, service, request, timeout) + result <- EitherT.fromEither[Future](command.handleResponse(response)) + } yield result + } + + private def submitRequest[Svc <: AbstractStub[Svc], Req, Res, Result]( + command: GrpcAdminCommand[Req, Res, Result] + )(instanceName: String, service: command.Svc, request: Req, timeout: Duration)(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): EitherT[Future, String, Res] = + CantonGrpcUtil + .sendGrpcRequest(service, instanceName)( + command.submitRequest(_, request), + LoggerUtil.truncateString(maxRequestDebugLines, maxRequestDebugStringLength)( + command.toString + ), + timeout, + logger, + CantonGrpcUtil.silentLogPolicy, // silent log policy, as the ConsoleEnvironment will log the result + _ => false, // no retry to optimize for low latency + ) + .leftMap(_.toString) +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/AdminCommand.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/AdminCommand.scala new file mode 100644 index 0000000000..05c78430bd --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/AdminCommand.scala @@ -0,0 +1,138 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.commands + +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{ + DefaultBoundedTimeout, + TimeoutType, +} +import com.digitalasset.canton.config.NonNegativeDuration +import io.grpc.stub.{AbstractStub, StreamObserver} +import io.grpc.{Context, ManagedChannel, Status, StatusException, StatusRuntimeException} + +import java.util.concurrent.{ScheduledExecutorService, TimeUnit} +import scala.collection.mutable.ListBuffer +import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{Future, Promise, blocking} + +trait AdminCommand[Req, Res, Result] { + + /** Create the request from configured options + */ + def createRequest(): Either[String, Req] + + /** Handle the response the service has provided + */ + def handleResponse(response: Res): Either[String, Result] + + /** Determines within which time frame the request should complete + * + * Some requests can run for a very long time. In this case, they should be "unbounded". + * For other requests, you will want to set a custom timeout apart from the global default bounded timeout + */ + def timeoutType: TimeoutType = DefaultBoundedTimeout + + /** Command's full name used to identify command in logging and span reporting + */ + def fullName: String = + // not using getClass.getSimpleName because it ignores the hierarchy of nested classes, and it also throws unexpected exceptions + getClass.getName.split('.').last.replace("$", ".") +} + +/** cantonctl GRPC Command + */ +trait GrpcAdminCommand[Req, Res, Result] extends AdminCommand[Req, Res, Result] { + + type Svc <: AbstractStub[Svc] + + /** Create the GRPC service to call + */ + def createService(channel: ManagedChannel): Svc + + /** Submit the created request to our service + */ + def submitRequest(service: Svc, request: Req): Future[Res] + +} + +object GrpcAdminCommand { + sealed trait TimeoutType extends Product with Serializable + + /** Custom timeout triggered by the client */ + final case class CustomClientTimeout(timeout: NonNegativeDuration) extends TimeoutType + + /** The Server will ensure the operation is timed out so the client timeout is set to an infinite value */ + case object ServerEnforcedTimeout extends TimeoutType + case object DefaultBoundedTimeout extends TimeoutType + case object DefaultUnboundedTimeout extends TimeoutType + + object GrpcErrorStatus { + def unapply(ex: Throwable): Option[Status] = ex match { + case e: StatusException => Some(e.getStatus) + case re: StatusRuntimeException => Some(re.getStatus) + case _ => None + } + } + + private[digitalasset] def streamedResponse[Request, Response, Result]( + service: (Request, StreamObserver[Response]) => Unit, + extract: Response => Seq[Result], + request: Request, + expected: Int, + timeout: FiniteDuration, + scheduler: ScheduledExecutorService, + ): Future[Seq[Result]] = { + val promise = Promise[Seq[Result]]() + val buffer = ListBuffer[Result]() + val context = Context.ROOT.withCancellation() + + def success(): Unit = blocking(buffer.synchronized { + context.close() + promise.trySuccess(buffer.toList).discard[Boolean] + }) + + context.run(() => + service( + request, + new StreamObserver[Response]() { + override def onNext(value: Response): Unit = { + val extracted = extract(value) + blocking(buffer.synchronized { + if (buffer.lengthCompare(expected) < 0) { + buffer ++= extracted + if (buffer.lengthCompare(expected) >= 0) { + success() + } + } + }) + } + + override def onError(t: Throwable): Unit = { + t match { + case GrpcErrorStatus(status) if status.getCode == Status.CANCELLED.getCode => + success() + case _ => + val _ = promise.tryFailure(t) + } + } + + override def onCompleted(): Unit = { + success() + } + }, + ) + ) + scheduler.schedule( + new Runnable() { + override def run(): Unit = { + val _ = context.cancel(Status.CANCELLED.asException()) + } + }, + timeout.toMillis, + TimeUnit.MILLISECONDS, + ) + promise.future + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/DomainAdminCommands.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/DomainAdminCommands.scala new file mode 100644 index 0000000000..a1be495133 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/DomainAdminCommands.scala @@ -0,0 +1,81 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.commands + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.digitalasset.canton.admin.api.client.data.StaticDomainParameters as StaticDomainParametersConfig +import com.digitalasset.canton.domain.admin.v0 as adminproto +import com.digitalasset.canton.domain.service.ServiceAgreementAcceptance +import com.digitalasset.canton.protocol.StaticDomainParameters as StaticDomainParametersInternal +import com.google.protobuf.empty.Empty +import io.grpc.ManagedChannel + +import scala.concurrent.Future + +object DomainAdminCommands { + + abstract class BaseDomainServiceCommand[Req, Rep, Res] extends GrpcAdminCommand[Req, Rep, Res] { + override type Svc = adminproto.DomainServiceGrpc.DomainServiceStub + override def createService( + channel: ManagedChannel + ): adminproto.DomainServiceGrpc.DomainServiceStub = + adminproto.DomainServiceGrpc.stub(channel) + } + + final case object ListAcceptedServiceAgreements + extends BaseDomainServiceCommand[Empty, adminproto.ServiceAgreementAcceptances, Seq[ + ServiceAgreementAcceptance + ]] { + override def createRequest(): Either[String, Empty] = Right(Empty()) + + override def submitRequest( + service: adminproto.DomainServiceGrpc.DomainServiceStub, + request: Empty, + ): Future[adminproto.ServiceAgreementAcceptances] = + service.listServiceAgreementAcceptances(request) + + override def handleResponse( + response: adminproto.ServiceAgreementAcceptances + ): Either[String, Seq[ServiceAgreementAcceptance]] = + response.acceptances + .traverse(ServiceAgreementAcceptance.fromProtoV0) + .bimap(_.toString, _.toSeq) + } + + final case class GetDomainParameters() + extends BaseDomainServiceCommand[ + adminproto.GetDomainParameters.Request, + adminproto.GetDomainParameters.Response, + StaticDomainParametersConfig, + ] { + override def createRequest(): Either[String, adminproto.GetDomainParameters.Request] = Right( + adminproto.GetDomainParameters.Request() + ) + override def submitRequest( + service: adminproto.DomainServiceGrpc.DomainServiceStub, + request: adminproto.GetDomainParameters.Request, + ): Future[adminproto.GetDomainParameters.Response] = + service.getDomainParametersVersioned(adminproto.GetDomainParameters.Request()) + + override def handleResponse( + response: adminproto.GetDomainParameters.Response + ): Either[String, StaticDomainParametersConfig] = { + import adminproto.GetDomainParameters.Response.Parameters + + response.parameters match { + case Parameters.Empty => Left("Field parameters was not found in the response") + case Parameters.ParametersV1(parametersV1) => + (for { + staticDomainParametersInternal <- StaticDomainParametersInternal.fromProtoV1( + parametersV1 + ) + staticDomainParametersConfig = StaticDomainParametersConfig( + staticDomainParametersInternal + ) + } yield staticDomainParametersConfig).leftMap(_.toString) + } + } + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/DomainTimeCommands.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/DomainTimeCommands.scala new file mode 100644 index 0000000000..0c57725eb3 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/DomainTimeCommands.scala @@ -0,0 +1,75 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.commands + +import cats.syntax.either.* +import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{ + CustomClientTimeout, + TimeoutType, +} +import com.digitalasset.canton.config.NonNegativeDuration +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.domain.api.v0 +import com.digitalasset.canton.domain.api.v0.DomainTimeServiceGrpc.DomainTimeServiceStub +import com.digitalasset.canton.time.{ + AwaitTimeRequest, + FetchTimeRequest, + FetchTimeResponse, + NonNegativeFiniteDuration, +} +import com.digitalasset.canton.topology.DomainId +import com.google.protobuf.empty.Empty +import io.grpc.ManagedChannel + +import scala.concurrent.Future + +object DomainTimeCommands { + + abstract class BaseDomainTimeCommand[Req, Rep, Res] extends GrpcAdminCommand[Req, Rep, Res] { + override type Svc = DomainTimeServiceStub + override def createService(channel: ManagedChannel): DomainTimeServiceStub = + v0.DomainTimeServiceGrpc.stub(channel) + } + + final case class FetchTime( + domainIdO: Option[DomainId], + freshnessBound: NonNegativeFiniteDuration, + timeout: NonNegativeDuration, + ) extends BaseDomainTimeCommand[FetchTimeRequest, v0.FetchTimeResponse, FetchTimeResponse] { + + override def createRequest(): Either[String, FetchTimeRequest] = + Right(FetchTimeRequest(domainIdO, freshnessBound)) + + override def submitRequest( + service: DomainTimeServiceStub, + request: FetchTimeRequest, + ): Future[v0.FetchTimeResponse] = + service.fetchTime(request.toProtoV0) + + override def handleResponse(response: v0.FetchTimeResponse): Either[String, FetchTimeResponse] = + FetchTimeResponse.fromProto(response).leftMap(_.toString) + + override def timeoutType: TimeoutType = CustomClientTimeout(timeout) + } + + final case class AwaitTime( + domainIdO: Option[DomainId], + time: CantonTimestamp, + timeout: NonNegativeDuration, + ) extends BaseDomainTimeCommand[AwaitTimeRequest, Empty, Unit] { + + override def createRequest(): Either[String, AwaitTimeRequest] = + Right(AwaitTimeRequest(domainIdO, time)) + + override def submitRequest( + service: DomainTimeServiceStub, + request: AwaitTimeRequest, + ): Future[Empty] = + service.awaitTime(request.toProtoV0) + + override def handleResponse(response: Empty): Either[String, Unit] = Right(()) + + override def timeoutType: TimeoutType = CustomClientTimeout(timeout) + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseMediatorAdministrationCommands.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseMediatorAdministrationCommands.scala new file mode 100644 index 0000000000..54075c59c2 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseMediatorAdministrationCommands.scala @@ -0,0 +1,175 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.commands + +import cats.syntax.either.* +import cats.syntax.option.* +import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{ + DefaultUnboundedTimeout, + TimeoutType, +} +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.crypto.{Fingerprint, PublicKey} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.domain.admin.v0.EnterpriseMediatorAdministrationServiceGrpc +import com.digitalasset.canton.domain.admin.{v0, v2} +import com.digitalasset.canton.domain.mediator.admin.gprc.{ + InitializeMediatorRequest, + InitializeMediatorRequestX, + InitializeMediatorResponse, + InitializeMediatorResponseX, +} +import com.digitalasset.canton.protocol.StaticDomainParameters +import com.digitalasset.canton.pruning.admin.v0.LocatePruningTimestamp +import com.digitalasset.canton.sequencing.SequencerConnections +import com.digitalasset.canton.topology.store.StoredTopologyTransactions +import com.digitalasset.canton.topology.transaction.TopologyChangeOp +import com.digitalasset.canton.topology.{DomainId, MediatorId} +import com.google.protobuf.empty.Empty +import io.grpc.ManagedChannel + +import scala.concurrent.Future + +object EnterpriseMediatorAdministrationCommands { + abstract class BaseMediatorInitializationCommand[Req, Rep, Res] + extends GrpcAdminCommand[Req, Rep, Res] { + override type Svc = v0.MediatorInitializationServiceGrpc.MediatorInitializationServiceStub + override def createService( + channel: ManagedChannel + ): v0.MediatorInitializationServiceGrpc.MediatorInitializationServiceStub = + v0.MediatorInitializationServiceGrpc.stub(channel) + } + abstract class BaseMediatorXInitializationCommand[Req, Rep, Res] + extends GrpcAdminCommand[Req, Rep, Res] { + override type Svc = v2.MediatorInitializationServiceGrpc.MediatorInitializationServiceStub + override def createService( + channel: ManagedChannel + ): v2.MediatorInitializationServiceGrpc.MediatorInitializationServiceStub = + v2.MediatorInitializationServiceGrpc.stub(channel) + } + abstract class BaseMediatorAdministrationCommand[Req, Rep, Res] + extends GrpcAdminCommand[Req, Rep, Res] { + override type Svc = + v0.EnterpriseMediatorAdministrationServiceGrpc.EnterpriseMediatorAdministrationServiceStub + override def createService( + channel: ManagedChannel + ): v0.EnterpriseMediatorAdministrationServiceGrpc.EnterpriseMediatorAdministrationServiceStub = + v0.EnterpriseMediatorAdministrationServiceGrpc.stub(channel) + } + + final case class Initialize( + domainId: DomainId, + mediatorId: MediatorId, + topologyState: Option[StoredTopologyTransactions[TopologyChangeOp.Positive]], + domainParameters: StaticDomainParameters, + sequencerConnections: SequencerConnections, + signingKeyFingerprint: Option[Fingerprint], + ) extends BaseMediatorInitializationCommand[ + v0.InitializeMediatorRequest, + v0.InitializeMediatorResponse, + PublicKey, + ] { + override def createRequest(): Either[String, v0.InitializeMediatorRequest] = + Right( + InitializeMediatorRequest( + domainId, + mediatorId, + topologyState, + domainParameters, + sequencerConnections, + signingKeyFingerprint, + ).toProtoV0 + ) + + override def submitRequest( + service: v0.MediatorInitializationServiceGrpc.MediatorInitializationServiceStub, + request: v0.InitializeMediatorRequest, + ): Future[v0.InitializeMediatorResponse] = + service.initialize(request) + override def handleResponse( + response: v0.InitializeMediatorResponse + ): Either[String, PublicKey] = + InitializeMediatorResponse + .fromProtoV0(response) + .leftMap(err => s"Failed to deserialize response: $err") + .flatMap(_.toEither) + } + + final case class InitializeX( + domainId: DomainId, + domainParameters: StaticDomainParameters, + sequencerConnections: SequencerConnections, + ) extends BaseMediatorXInitializationCommand[ + v2.InitializeMediatorRequest, + v2.InitializeMediatorResponse, + Unit, + ] { + override def createRequest(): Either[String, v2.InitializeMediatorRequest] = + Right( + InitializeMediatorRequestX( + domainId, + domainParameters, + sequencerConnections, + ).toProtoV2 + ) + + override def submitRequest( + service: v2.MediatorInitializationServiceGrpc.MediatorInitializationServiceStub, + request: v2.InitializeMediatorRequest, + ): Future[v2.InitializeMediatorResponse] = + service.initialize(request) + override def handleResponse( + response: v2.InitializeMediatorResponse + ): Either[String, Unit] = + InitializeMediatorResponseX + .fromProtoV2(response) + .leftMap(err => s"Failed to deserialize response: $err") + .map(_ => ()) + + } + + final case class Prune(timestamp: CantonTimestamp) + extends GrpcAdminCommand[v0.MediatorPruningRequest, Empty, Unit] { + override type Svc = + v0.EnterpriseMediatorAdministrationServiceGrpc.EnterpriseMediatorAdministrationServiceStub + override def createService( + channel: ManagedChannel + ): v0.EnterpriseMediatorAdministrationServiceGrpc.EnterpriseMediatorAdministrationServiceStub = + v0.EnterpriseMediatorAdministrationServiceGrpc.stub(channel) + override def createRequest(): Either[String, v0.MediatorPruningRequest] = + Right(v0.MediatorPruningRequest(timestamp.toProtoPrimitive.some)) + override def submitRequest( + service: v0.EnterpriseMediatorAdministrationServiceGrpc.EnterpriseMediatorAdministrationServiceStub, + request: v0.MediatorPruningRequest, + ): Future[Empty] = service.prune(request) + override def handleResponse(response: Empty): Either[String, Unit] = Right(()) + + // all pruning commands will potentially take a long time + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + } + + final case class LocatePruningTimestampCommand(index: PositiveInt) + extends BaseMediatorAdministrationCommand[ + LocatePruningTimestamp.Request, + LocatePruningTimestamp.Response, + Option[CantonTimestamp], + ] { + override def createRequest(): Either[String, LocatePruningTimestamp.Request] = Right( + LocatePruningTimestamp.Request(index.value) + ) + + override def submitRequest( + service: EnterpriseMediatorAdministrationServiceGrpc.EnterpriseMediatorAdministrationServiceStub, + request: LocatePruningTimestamp.Request, + ): Future[LocatePruningTimestamp.Response] = + service.locatePruningTimestamp(request) + + override def handleResponse( + response: LocatePruningTimestamp.Response + ): Either[String, Option[CantonTimestamp]] = + response.timestamp.fold(Right(None): Either[String, Option[CantonTimestamp]])( + CantonTimestamp.fromProtoPrimitive(_).bimap(_.message, Some(_)) + ) + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseSequencerAdminCommands.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseSequencerAdminCommands.scala new file mode 100644 index 0000000000..4b5d3d365d --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseSequencerAdminCommands.scala @@ -0,0 +1,289 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.commands + +import cats.syntax.either.* +import cats.syntax.option.* +import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{ + DefaultUnboundedTimeout, + TimeoutType, +} +import com.digitalasset.canton.admin.api.client.data.StaticDomainParameters +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.domain.admin.v2.SequencerInitializationServiceGrpc +import com.digitalasset.canton.domain.admin.{v0, v2} +import com.digitalasset.canton.domain.sequencing.admin.grpc.{ + InitializeSequencerRequest, + InitializeSequencerRequestX, + InitializeSequencerResponse, + InitializeSequencerResponseX, +} +import com.digitalasset.canton.domain.sequencing.sequencer.{LedgerIdentity, SequencerSnapshot} +import com.digitalasset.canton.pruning.admin.v0.LocatePruningTimestamp +import com.digitalasset.canton.topology.store.StoredTopologyTransactions +import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.GenericStoredTopologyTransactionsX +import com.digitalasset.canton.topology.transaction.TopologyChangeOp +import com.digitalasset.canton.topology.{DomainId, Member} +import com.google.protobuf.empty.Empty +import io.grpc.ManagedChannel + +import scala.concurrent.Future + +object EnterpriseSequencerAdminCommands { + abstract class BaseSequencerInitializationCommand[Req, Rep, Res] + extends GrpcAdminCommand[Req, Rep, Res] { + override type Svc = v0.SequencerInitializationServiceGrpc.SequencerInitializationServiceStub + override def createService( + channel: ManagedChannel + ): v0.SequencerInitializationServiceGrpc.SequencerInitializationServiceStub = + v0.SequencerInitializationServiceGrpc.stub(channel) + } + + abstract class BaseSequencerAdministrationCommand[Req, Rep, Res] + extends GrpcAdminCommand[Req, Rep, Res] { + override type Svc = + v0.EnterpriseSequencerAdministrationServiceGrpc.EnterpriseSequencerAdministrationServiceStub + override def createService( + channel: ManagedChannel + ): v0.EnterpriseSequencerAdministrationServiceGrpc.EnterpriseSequencerAdministrationServiceStub = + v0.EnterpriseSequencerAdministrationServiceGrpc.stub(channel) + } + + abstract class BaseSequencerTopologyBootstrapCommand[Req, Rep, Res] + extends GrpcAdminCommand[Req, Rep, Res] { + override type Svc = v0.TopologyBootstrapServiceGrpc.TopologyBootstrapServiceStub + override def createService( + channel: ManagedChannel + ): v0.TopologyBootstrapServiceGrpc.TopologyBootstrapServiceStub = + v0.TopologyBootstrapServiceGrpc.stub(channel) + } + + sealed trait Initialize[ProtoRequest] + extends BaseSequencerInitializationCommand[ + ProtoRequest, + v0.InitResponse, + InitializeSequencerResponse, + ] { + protected def domainId: DomainId + protected def topologySnapshot: StoredTopologyTransactions[TopologyChangeOp.Positive] + + protected def domainParameters: StaticDomainParameters + + protected def snapshotO: Option[SequencerSnapshot] + + protected def serializer: InitializeSequencerRequest => ProtoRequest + + override def createRequest(): Either[String, ProtoRequest] = { + val request = InitializeSequencerRequest( + domainId, + topologySnapshot, + domainParameters.toInternal, + snapshotO, + ) + Right(serializer(request)) + } + + override def handleResponse( + response: v0.InitResponse + ): Either[String, InitializeSequencerResponse] = + InitializeSequencerResponse + .fromProtoV0(response) + .leftMap(err => s"Failed to deserialize response: $err") + + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + } + + object Initialize { + final case class V2( + domainId: DomainId, + topologySnapshot: StoredTopologyTransactions[TopologyChangeOp.Positive], + domainParameters: StaticDomainParameters, + snapshotO: Option[SequencerSnapshot], + ) extends Initialize[v2.InitRequest] { + + override protected def serializer: InitializeSequencerRequest => v2.InitRequest = _.toProtoV2 + + override def submitRequest( + service: v0.SequencerInitializationServiceGrpc.SequencerInitializationServiceStub, + request: v2.InitRequest, + ): Future[v0.InitResponse] = + service.initV2(request) + } + + def apply( + domainId: DomainId, + topologySnapshot: StoredTopologyTransactions[TopologyChangeOp.Positive], + domainParameters: StaticDomainParameters, + snapshotO: Option[SequencerSnapshot] = None, + ): Initialize[_] = + V2(domainId, topologySnapshot, domainParameters, snapshotO) + } + + final case class InitializeX( + topologySnapshot: GenericStoredTopologyTransactionsX, + domainParameters: com.digitalasset.canton.protocol.StaticDomainParameters, + sequencerSnapshot: Option[SequencerSnapshot], + ) extends GrpcAdminCommand[ + v2.InitializeSequencerRequest, + v2.InitializeSequencerResponse, + InitializeSequencerResponseX, + ] { + override type Svc = v2.SequencerInitializationServiceGrpc.SequencerInitializationServiceStub + + override def createService( + channel: ManagedChannel + ): SequencerInitializationServiceGrpc.SequencerInitializationServiceStub = + v2.SequencerInitializationServiceGrpc.stub(channel) + + override def submitRequest( + service: SequencerInitializationServiceGrpc.SequencerInitializationServiceStub, + request: v2.InitializeSequencerRequest, + ): Future[v2.InitializeSequencerResponse] = + service.initialize(request) + + override def createRequest(): Either[String, v2.InitializeSequencerRequest] = + Right( + InitializeSequencerRequestX( + topologySnapshot, + domainParameters, + sequencerSnapshot, + ).toProtoV2 + ) + + override def handleResponse( + response: v2.InitializeSequencerResponse + ): Either[String, InitializeSequencerResponseX] = + InitializeSequencerResponseX.fromProtoV2(response).leftMap(_.toString) + } + + final case class Snapshot(timestamp: CantonTimestamp) + extends BaseSequencerAdministrationCommand[ + v0.Snapshot.Request, + v0.Snapshot.Response, + SequencerSnapshot, + ] { + override def createRequest(): Either[String, v0.Snapshot.Request] = { + Right(v0.Snapshot.Request(Some(timestamp.toProtoPrimitive))) + } + + override def submitRequest( + service: v0.EnterpriseSequencerAdministrationServiceGrpc.EnterpriseSequencerAdministrationServiceStub, + request: v0.Snapshot.Request, + ): Future[v0.Snapshot.Response] = service.snapshot(request) + + override def handleResponse(response: v0.Snapshot.Response): Either[String, SequencerSnapshot] = + response.value match { + case v0.Snapshot.Response.Value.Failure(v0.Snapshot.Failure(reason)) => Left(reason) + case v0.Snapshot.Response.Value.Success(v0.Snapshot.Success(Some(result))) => + SequencerSnapshot.fromProtoV1(result).leftMap(_.toString) + case v0.Snapshot.Response.Value.VersionedSuccess(v0.Snapshot.VersionedSuccess(snapshot)) => + SequencerSnapshot.fromByteString(snapshot).leftMap(_.toString) + case _ => Left("response is empty") + } + + // command will potentially take a long time + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + + final case class Prune(timestamp: CantonTimestamp) + extends BaseSequencerAdministrationCommand[v0.Pruning.Request, v0.Pruning.Response, String] { + override def createRequest(): Either[String, v0.Pruning.Request] = + Right(v0.Pruning.Request(timestamp.toProtoPrimitive.some)) + + override def submitRequest( + service: v0.EnterpriseSequencerAdministrationServiceGrpc.EnterpriseSequencerAdministrationServiceStub, + request: v0.Pruning.Request, + ): Future[v0.Pruning.Response] = + service.prune(request) + override def handleResponse(response: v0.Pruning.Response): Either[String, String] = + Either.cond( + response.details.nonEmpty, + response.details, + "Pruning response did not contain details", + ) + + // command will potentially take a long time + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + + final case class LocatePruningTimestampCommand(index: PositiveInt) + extends BaseSequencerAdministrationCommand[ + LocatePruningTimestamp.Request, + LocatePruningTimestamp.Response, + Option[CantonTimestamp], + ] { + override def createRequest(): Either[String, LocatePruningTimestamp.Request] = Right( + LocatePruningTimestamp.Request(index.value) + ) + + override def submitRequest( + service: v0.EnterpriseSequencerAdministrationServiceGrpc.EnterpriseSequencerAdministrationServiceStub, + request: LocatePruningTimestamp.Request, + ): Future[LocatePruningTimestamp.Response] = + service.locatePruningTimestamp(request) + + override def handleResponse( + response: LocatePruningTimestamp.Response + ): Either[String, Option[CantonTimestamp]] = + response.timestamp.fold(Right(None): Either[String, Option[CantonTimestamp]])( + CantonTimestamp.fromProtoPrimitive(_).bimap(_.message, Some(_)) + ) + } + + final case class DisableMember(member: Member) + extends BaseSequencerAdministrationCommand[v0.DisableMemberRequest, Empty, Unit] { + override def createRequest(): Either[String, v0.DisableMemberRequest] = + Right(v0.DisableMemberRequest(member.toProtoPrimitive)) + override def submitRequest( + service: v0.EnterpriseSequencerAdministrationServiceGrpc.EnterpriseSequencerAdministrationServiceStub, + request: v0.DisableMemberRequest, + ): Future[Empty] = service.disableMember(request) + override def handleResponse(response: Empty): Either[String, Unit] = Right(()) + } + + final case class AuthorizeLedgerIdentity(ledgerIdentity: LedgerIdentity) + extends BaseSequencerAdministrationCommand[ + v0.LedgerIdentity.AuthorizeRequest, + v0.LedgerIdentity.AuthorizeResponse, + Unit, + ] { + override def createRequest(): Either[String, v0.LedgerIdentity.AuthorizeRequest] = + Right(v0.LedgerIdentity.AuthorizeRequest(Some(ledgerIdentity.toProtoV0))) + override def submitRequest( + service: v0.EnterpriseSequencerAdministrationServiceGrpc.EnterpriseSequencerAdministrationServiceStub, + request: v0.LedgerIdentity.AuthorizeRequest, + ): Future[v0.LedgerIdentity.AuthorizeResponse] = service.authorizeLedgerIdentity(request) + override def handleResponse( + response: v0.LedgerIdentity.AuthorizeResponse + ): Either[String, Unit] = response.value match { + case v0.LedgerIdentity.AuthorizeResponse.Value.Failure(v0.LedgerIdentity.Failure(reason)) => + Left(reason) + case v0.LedgerIdentity.AuthorizeResponse.Value.Success(v0.LedgerIdentity.Success()) => + Right(()) + case other => Left(s"Empty response: $other") + } + } + + final case class BootstrapTopology( + topologySnapshot: StoredTopologyTransactions[TopologyChangeOp.Positive] + ) extends BaseSequencerTopologyBootstrapCommand[v0.TopologyBootstrapRequest, Empty, Unit] { + override def createRequest(): Either[String, v0.TopologyBootstrapRequest] = + Right(v0.TopologyBootstrapRequest(Some(topologySnapshot.toProtoV0))) + + override def submitRequest( + service: v0.TopologyBootstrapServiceGrpc.TopologyBootstrapServiceStub, + request: v0.TopologyBootstrapRequest, + ): Future[Empty] = + service.bootstrap(request) + + override def handleResponse(response: Empty): Either[String, Unit] = Right(()) + + // command will potentially take a long time + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiCommands.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiCommands.scala new file mode 100644 index 0000000000..64d75a33d1 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiCommands.scala @@ -0,0 +1,1588 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.commands + +import cats.syntax.either.* +import com.daml.ledger.api +import com.daml.ledger.api.v1.active_contracts_service.ActiveContractsServiceGrpc.ActiveContractsServiceStub +import com.daml.ledger.api.v1.active_contracts_service.{ + ActiveContractsServiceGrpc, + GetActiveContractsRequest, + GetActiveContractsResponse, +} +import com.daml.ledger.api.v1.admin.identity_provider_config_service.IdentityProviderConfigServiceGrpc.IdentityProviderConfigServiceStub +import com.daml.ledger.api.v1.admin.identity_provider_config_service.{ + CreateIdentityProviderConfigRequest, + CreateIdentityProviderConfigResponse, + DeleteIdentityProviderConfigRequest, + DeleteIdentityProviderConfigResponse, + GetIdentityProviderConfigRequest, + GetIdentityProviderConfigResponse, + IdentityProviderConfig, + IdentityProviderConfigServiceGrpc, + ListIdentityProviderConfigsRequest, + ListIdentityProviderConfigsResponse, + UpdateIdentityProviderConfigRequest, + UpdateIdentityProviderConfigResponse, +} +import com.daml.ledger.api.v1.admin.metering_report_service.MeteringReportServiceGrpc.MeteringReportServiceStub +import com.daml.ledger.api.v1.admin.metering_report_service.{ + GetMeteringReportRequest, + GetMeteringReportResponse, + MeteringReportServiceGrpc, +} +import com.daml.ledger.api.v1.admin.object_meta.ObjectMeta +import com.daml.ledger.api.v1.admin.package_management_service.PackageManagementServiceGrpc.PackageManagementServiceStub +import com.daml.ledger.api.v1.admin.package_management_service.* +import com.daml.ledger.api.v1.admin.participant_pruning_service.ParticipantPruningServiceGrpc.ParticipantPruningServiceStub +import com.daml.ledger.api.v1.admin.participant_pruning_service.* +import com.daml.ledger.api.v1.admin.party_management_service.PartyManagementServiceGrpc.PartyManagementServiceStub +import com.daml.ledger.api.v1.admin.party_management_service.* +import com.daml.ledger.api.v1.admin.user_management_service.UserManagementServiceGrpc.UserManagementServiceStub +import com.daml.ledger.api.v1.admin.user_management_service.{ + CreateUserRequest, + CreateUserResponse, + DeleteUserRequest, + DeleteUserResponse, + GetUserRequest, + GetUserResponse, + GrantUserRightsRequest, + GrantUserRightsResponse, + ListUserRightsRequest, + ListUserRightsResponse, + ListUsersRequest, + ListUsersResponse, + RevokeUserRightsRequest, + RevokeUserRightsResponse, + Right as UserRight, + UpdateUserIdentityProviderRequest, + UpdateUserIdentityProviderResponse, + UpdateUserRequest, + UpdateUserResponse, + User, + UserManagementServiceGrpc, +} +import com.daml.ledger.api.v1.command_completion_service.CommandCompletionServiceGrpc.CommandCompletionServiceStub +import com.daml.ledger.api.v1.command_completion_service.* +import com.daml.ledger.api.v1.command_service.CommandServiceGrpc.CommandServiceStub +import com.daml.ledger.api.v1.command_service.{ + CommandServiceGrpc, + SubmitAndWaitForTransactionResponse, + SubmitAndWaitForTransactionTreeResponse, + SubmitAndWaitRequest, +} +import com.daml.ledger.api.v1.command_submission_service.CommandSubmissionServiceGrpc.CommandSubmissionServiceStub +import com.daml.ledger.api.v1.command_submission_service.{ + CommandSubmissionServiceGrpc, + SubmitRequest, +} +import com.daml.ledger.api.v1.commands.{Command, Commands as CommandsV1, DisclosedContract} +import com.daml.ledger.api.v1.completion.Completion +import com.daml.ledger.api.v1.event_query_service.EventQueryServiceGrpc.EventQueryServiceStub +import com.daml.ledger.api.v1.event_query_service.* +import com.daml.ledger.api.v1.ledger_configuration_service.LedgerConfigurationServiceGrpc.LedgerConfigurationServiceStub +import com.daml.ledger.api.v1.ledger_configuration_service.{ + GetLedgerConfigurationRequest, + GetLedgerConfigurationResponse, + LedgerConfiguration, + LedgerConfigurationServiceGrpc, +} +import com.daml.ledger.api.v1.ledger_offset.LedgerOffset +import com.daml.ledger.api.v1.testing.time_service.TimeServiceGrpc.TimeServiceStub +import com.daml.ledger.api.v1.testing.time_service.{ + GetTimeRequest, + GetTimeResponse, + SetTimeRequest, + TimeServiceGrpc, +} +import com.daml.ledger.api.v1.transaction.{Transaction, TransactionTree} +import com.daml.ledger.api.v1.transaction_filter.{ + Filters, + InclusiveFilters, + TemplateFilter, + TransactionFilter, +} +import com.daml.ledger.api.v1.transaction_service.TransactionServiceGrpc.TransactionServiceStub +import com.daml.ledger.api.v1.transaction_service.* +import com.digitalasset.canton.LfPartyId +import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{ + DefaultUnboundedTimeout, + ServerEnforcedTimeout, + TimeoutType, +} +import com.digitalasset.canton.admin.api.client.commands.LedgerApiTypeWrappers.WrappedCreatedEvent +import com.digitalasset.canton.admin.api.client.data.{ + LedgerApiUser, + LedgerMeteringReport, + ListLedgerApiUsersResult, + TemplateId, + UserRights, +} +import com.digitalasset.canton.config.NonNegativeDuration +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.ledger.api.domain.{IdentityProviderId, JwksUrl} +import com.digitalasset.canton.ledger.api.{DeduplicationPeriod, domain} +import com.digitalasset.canton.ledger.client.services.admin.IdentityProviderConfigClient +import com.digitalasset.canton.logging.ErrorLoggingContext +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.networking.grpc.ForwardingStreamObserver +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.topology.PartyId +import com.digitalasset.canton.util.BinaryFileUtil +import com.google.protobuf.empty.Empty +import com.google.protobuf.field_mask.FieldMask +import io.grpc.* +import io.grpc.stub.StreamObserver + +import java.time.Instant +import java.util.UUID +import java.util.concurrent.ScheduledExecutorService +import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{ExecutionContext, Future} + +object LedgerApiCommands { + + final val defaultApplicationId = "CantonConsole" + + object TransactionService { + + trait BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = TransactionServiceStub + override def createService(channel: ManagedChannel): TransactionServiceStub = + TransactionServiceGrpc.stub(channel) + } + + final case class GetLedgerEnd() + extends BaseCommand[GetLedgerEndRequest, GetLedgerEndResponse, LedgerOffset] { + override def createRequest(): Either[String, GetLedgerEndRequest] = Right( + GetLedgerEndRequest() + ) + override def submitRequest( + service: TransactionServiceStub, + request: GetLedgerEndRequest, + ): Future[GetLedgerEndResponse] = + service.getLedgerEnd(request) + override def handleResponse(response: GetLedgerEndResponse): Either[String, LedgerOffset] = + response.offset.toRight("Received empty response without offset") + } + + trait SubscribeBase[Resp, Res] + extends BaseCommand[GetTransactionsRequest, AutoCloseable, AutoCloseable] { + // The subscription should never be cut short because of a gRPC timeout + override def timeoutType: TimeoutType = ServerEnforcedTimeout + + def observer: StreamObserver[Res] + + def begin: LedgerOffset + + def end: Option[LedgerOffset] + + def filter: TransactionFilter + + def verbose: Boolean + + def doRequest( + service: TransactionServiceStub, + request: GetTransactionsRequest, + rawObserver: StreamObserver[Resp], + ): Unit + + def extractResults(response: Resp): IterableOnce[Res] + + implicit def loggingContext: ErrorLoggingContext + + override def createRequest(): Either[String, GetTransactionsRequest] = Right { + GetTransactionsRequest( + begin = Some(begin), + end = end, + verbose = verbose, + filter = Some(filter), + ) + } + + override def submitRequest( + service: TransactionServiceStub, + request: GetTransactionsRequest, + ): Future[AutoCloseable] = { + val rawObserver = new ForwardingStreamObserver[Resp, Res](observer, extractResults) + val context = Context.current().withCancellation() + context.run(() => doRequest(service, request, rawObserver)) + Future.successful(context) + } + + override def handleResponse(response: AutoCloseable): Either[String, AutoCloseable] = Right( + response + ) + } + + final case class SubscribeTrees( + override val observer: StreamObserver[TransactionTree], + override val begin: LedgerOffset, + override val end: Option[LedgerOffset], + override val filter: TransactionFilter, + override val verbose: Boolean, + )(override implicit val loggingContext: ErrorLoggingContext) + extends SubscribeBase[GetTransactionTreesResponse, TransactionTree] { + override def doRequest( + service: TransactionServiceStub, + request: GetTransactionsRequest, + rawObserver: StreamObserver[GetTransactionTreesResponse], + ): Unit = + service.getTransactionTrees(request, rawObserver) + + override def extractResults( + response: GetTransactionTreesResponse + ): IterableOnce[TransactionTree] = + response.transactions + } + + final case class SubscribeFlat( + override val observer: StreamObserver[Transaction], + override val begin: LedgerOffset, + override val end: Option[LedgerOffset], + override val filter: TransactionFilter, + override val verbose: Boolean, + )(override implicit val loggingContext: ErrorLoggingContext) + extends SubscribeBase[GetTransactionsResponse, Transaction] { + override def doRequest( + service: TransactionServiceStub, + request: GetTransactionsRequest, + rawObserver: StreamObserver[GetTransactionsResponse], + ): Unit = + service.getTransactions(request, rawObserver) + + override def extractResults(response: GetTransactionsResponse): IterableOnce[Transaction] = + response.transactions + } + + final case class GetTransactionById(parties: Set[LfPartyId], id: String)(implicit + ec: ExecutionContext + ) extends BaseCommand[GetTransactionByIdRequest, GetTransactionResponse, Option[ + TransactionTree + ]] + with PrettyPrinting { + override def createRequest(): Either[String, GetTransactionByIdRequest] = Right { + GetTransactionByIdRequest( + transactionId = id, + requestingParties = parties.toSeq, + ) + } + + override def submitRequest( + service: TransactionServiceStub, + request: GetTransactionByIdRequest, + ): Future[GetTransactionResponse] = { + // The Ledger API will throw an error if it can't find a transaction by ID. + // However, as Canton is distributed, a transaction ID might show up later, so we don't treat this as + // an error and change it to a None + service.getTransactionById(request).recover { + case e: StatusRuntimeException if e.getStatus.getCode == Status.Code.NOT_FOUND => + GetTransactionResponse(None) + } + } + + override def handleResponse( + response: GetTransactionResponse + ): Either[String, Option[TransactionTree]] = + Right(response.transaction) + + override def pretty: Pretty[GetTransactionById] = + prettyOfClass( + param("id", _.id.unquoted), + param("parties", _.parties), + ) + } + + } + + object PartyManagementService { + abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = PartyManagementServiceStub + + override def createService(channel: ManagedChannel): PartyManagementServiceStub = + PartyManagementServiceGrpc.stub(channel) + } + + final case class AllocateParty( + partyIdHint: String, + displayName: String, + annotations: Map[String, String], + identityProviderId: String, + ) extends BaseCommand[AllocatePartyRequest, AllocatePartyResponse, PartyDetails] { + override def createRequest(): Either[String, AllocatePartyRequest] = + Right( + AllocatePartyRequest( + partyIdHint = partyIdHint, + displayName = displayName, + localMetadata = Some(ObjectMeta(annotations = annotations)), + identityProviderId = identityProviderId, + ) + ) + override def submitRequest( + service: PartyManagementServiceStub, + request: AllocatePartyRequest, + ): Future[AllocatePartyResponse] = + service.allocateParty(request) + override def handleResponse(response: AllocatePartyResponse): Either[String, PartyDetails] = + response.partyDetails.toRight("Party could not be created") + } + + final case class Update( + party: PartyId, + annotationsUpdate: Option[Map[String, String]], + resourceVersionO: Option[String], + identityProviderId: String, + ) extends BaseCommand[UpdatePartyDetailsRequest, UpdatePartyDetailsResponse, PartyDetails] { + + override def submitRequest( + service: PartyManagementServiceStub, + request: UpdatePartyDetailsRequest, + ): Future[UpdatePartyDetailsResponse] = + service.updatePartyDetails(request) + + override def createRequest(): Either[String, UpdatePartyDetailsRequest] = { + val metadata = ObjectMeta( + annotations = annotationsUpdate.getOrElse(Map.empty), + resourceVersion = resourceVersionO.getOrElse(""), + ) + val partyDetails = + PartyDetails( + party = party.toProtoPrimitive, + localMetadata = Some(metadata), + identityProviderId = identityProviderId, + ) + val updatePaths = + annotationsUpdate.fold(Seq.empty[String])(_ => Seq("local_metadata.annotations")) + val req = UpdatePartyDetailsRequest( + partyDetails = Some(partyDetails), + updateMask = Some(FieldMask(paths = updatePaths)), + ) + Right(req) + } + + override def handleResponse( + response: UpdatePartyDetailsResponse + ): Either[String, PartyDetails] = + response.partyDetails.toRight("Failed to update the party details") + + } + + final case class ListKnownParties(identityProviderId: String) + extends BaseCommand[ListKnownPartiesRequest, ListKnownPartiesResponse, Seq[ + PartyDetails + ]] { + override def createRequest(): Either[String, ListKnownPartiesRequest] = + Right( + ListKnownPartiesRequest( + identityProviderId = identityProviderId + ) + ) + override def submitRequest( + service: PartyManagementServiceStub, + request: ListKnownPartiesRequest, + ): Future[ListKnownPartiesResponse] = + service.listKnownParties(request) + override def handleResponse( + response: ListKnownPartiesResponse + ): Either[String, Seq[PartyDetails]] = + Right(response.partyDetails) + } + + final case class GetParty(party: PartyId, identityProviderId: String) + extends BaseCommand[GetPartiesRequest, GetPartiesResponse, PartyDetails] { + + override def createRequest(): Either[String, GetPartiesRequest] = + Right( + GetPartiesRequest( + parties = Seq(party.toProtoPrimitive), + identityProviderId = identityProviderId, + ) + ) + + override def submitRequest( + service: PartyManagementServiceStub, + request: GetPartiesRequest, + ): Future[GetPartiesResponse] = service.getParties(request) + + override def handleResponse( + response: GetPartiesResponse + ): Either[String, PartyDetails] = { + response.partyDetails.headOption.toRight("PARTY_NOT_FOUND") + } + } + + final case class UpdateIdp( + party: PartyId, + sourceIdentityProviderId: String, + targetIdentityProviderId: String, + ) extends BaseCommand[ + UpdatePartyIdentityProviderRequest, + UpdatePartyIdentityProviderResponse, + Unit, + ] { + + override def submitRequest( + service: PartyManagementServiceStub, + request: UpdatePartyIdentityProviderRequest, + ): Future[UpdatePartyIdentityProviderResponse] = + service.updatePartyIdentityProviderId(request) + + override def createRequest(): Either[String, UpdatePartyIdentityProviderRequest] = Right( + UpdatePartyIdentityProviderRequest( + party = party.toProtoPrimitive, + sourceIdentityProviderId = sourceIdentityProviderId, + targetIdentityProviderId = targetIdentityProviderId, + ) + ) + + override def handleResponse( + response: UpdatePartyIdentityProviderResponse + ): Either[String, Unit] = Right(()) + + } + } + + object PackageService { + + abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = PackageManagementServiceStub + override def createService(channel: ManagedChannel): PackageManagementServiceStub = + PackageManagementServiceGrpc.stub(channel) + } + + final case class UploadDarFile(darPath: String) + extends BaseCommand[UploadDarFileRequest, UploadDarFileResponse, Unit] { + + override def createRequest(): Either[String, UploadDarFileRequest] = + for { + bytes <- BinaryFileUtil.readByteStringFromFile(darPath) + } yield UploadDarFileRequest(bytes) + override def submitRequest( + service: PackageManagementServiceStub, + request: UploadDarFileRequest, + ): Future[UploadDarFileResponse] = + service.uploadDarFile(request) + override def handleResponse(response: UploadDarFileResponse): Either[String, Unit] = + Right(()) + + // package upload time might take long if it is a big package + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + + final case class ListKnownPackages(limit: PositiveInt) + extends BaseCommand[ListKnownPackagesRequest, ListKnownPackagesResponse, Seq[ + PackageDetails + ]] { + + override def createRequest(): Either[String, ListKnownPackagesRequest] = Right( + ListKnownPackagesRequest() + ) + + override def submitRequest( + service: PackageManagementServiceStub, + request: ListKnownPackagesRequest, + ): Future[ListKnownPackagesResponse] = + service.listKnownPackages(request) + + override def handleResponse( + response: ListKnownPackagesResponse + ): Either[String, Seq[PackageDetails]] = + Right(response.packageDetails.take(limit.value)) + } + + } + + object CommandCompletionService { + abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = CommandCompletionServiceStub + override def createService(channel: ManagedChannel): CommandCompletionServiceStub = + CommandCompletionServiceGrpc.stub(channel) + } + + final case class CompletionEnd() + extends BaseCommand[CompletionEndRequest, CompletionEndResponse, LedgerOffset] { + + override def createRequest(): Either[String, CompletionEndRequest] = + Right(CompletionEndRequest()) + + override def submitRequest( + service: CommandCompletionServiceStub, + request: CompletionEndRequest, + ): Future[CompletionEndResponse] = + service.completionEnd(request) + override def handleResponse(response: CompletionEndResponse): Either[String, LedgerOffset] = + response.offset.toRight("Empty CompletionEndResponse received without offset") + } + + final case class CompletionRequest( + partyId: LfPartyId, + offset: LedgerOffset, + expectedCompletions: Int, + timeout: java.time.Duration, + applicationId: String, + )(filter: Completion => Boolean, scheduler: ScheduledExecutorService) + extends BaseCommand[CompletionStreamRequest, Seq[Completion], Seq[Completion]] { + + override def createRequest(): Either[String, CompletionStreamRequest] = + Right( + CompletionStreamRequest( + applicationId = applicationId, + parties = Seq(partyId), + offset = Some(offset), + ) + ) + + override def submitRequest( + service: CommandCompletionServiceStub, + request: CompletionStreamRequest, + ): Future[Seq[Completion]] = { + import scala.jdk.DurationConverters.* + GrpcAdminCommand + .streamedResponse[CompletionStreamRequest, CompletionStreamResponse, Completion]( + service.completionStream, + _.completions.filter(filter), + request, + expectedCompletions, + timeout.toScala, + scheduler, + ) + } + + override def handleResponse(response: Seq[Completion]): Either[String, Seq[Completion]] = + Right(response) + + override def timeoutType: TimeoutType = ServerEnforcedTimeout + } + + final case class CompletionCheckpointRequest( + partyId: LfPartyId, + offset: LedgerOffset, + expectedCompletions: Int, + timeout: NonNegativeDuration, + applicationId: String, + )(filter: Completion => Boolean, scheduler: ScheduledExecutorService) + extends BaseCommand[CompletionStreamRequest, Seq[(Completion, Option[Checkpoint])], Seq[ + (Completion, Option[Checkpoint]) + ]] { + + override def createRequest(): Either[String, CompletionStreamRequest] = + Right( + CompletionStreamRequest( + applicationId = applicationId, + parties = Seq(partyId), + offset = Some(offset), + ) + ) + + override def submitRequest( + service: CommandCompletionServiceStub, + request: CompletionStreamRequest, + ): Future[Seq[(Completion, Option[Checkpoint])]] = { + def extract(response: CompletionStreamResponse): Seq[(Completion, Option[Checkpoint])] = { + val checkpoint = response.checkpoint + response.completions.filter(filter).map(_ -> checkpoint) + } + + GrpcAdminCommand.streamedResponse[ + CompletionStreamRequest, + CompletionStreamResponse, + (Completion, Option[Checkpoint]), + ]( + service.completionStream, + extract, + request, + expectedCompletions, + timeout.asFiniteApproximation, + scheduler, + ) + } + + override def handleResponse( + response: Seq[(Completion, Option[Checkpoint])] + ): Either[String, Seq[(Completion, Option[Checkpoint])]] = + Right(response) + + override def timeoutType: TimeoutType = ServerEnforcedTimeout + } + + final case class Subscribe( + observer: StreamObserver[Completion], + parties: Seq[String], + offset: Option[LedgerOffset], + applicationId: String, + )(implicit loggingContext: ErrorLoggingContext) + extends BaseCommand[CompletionStreamRequest, AutoCloseable, AutoCloseable] { + // The subscription should never be cut short because of a gRPC timeout + override def timeoutType: TimeoutType = ServerEnforcedTimeout + + override def createRequest(): Either[String, CompletionStreamRequest] = Right { + CompletionStreamRequest( + applicationId = applicationId, + parties = parties, + offset = offset, + ) + } + + override def submitRequest( + service: CommandCompletionServiceStub, + request: CompletionStreamRequest, + ): Future[AutoCloseable] = { + val rawObserver = new ForwardingStreamObserver[CompletionStreamResponse, Completion]( + observer, + _.completions, + ) + val context = Context.current().withCancellation() + context.run(() => service.completionStream(request, rawObserver)) + Future.successful(context) + } + + override def handleResponse(response: AutoCloseable): Either[String, AutoCloseable] = Right( + response + ) + } + } + + object LedgerConfigurationService { + abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = LedgerConfigurationServiceStub + override def createService(channel: ManagedChannel): LedgerConfigurationServiceStub = + LedgerConfigurationServiceGrpc.stub(channel) + } + + final case class GetLedgerConfiguration( + expectedConfigs: Int, + timeout: FiniteDuration, + )(scheduler: ScheduledExecutorService) + extends BaseCommand[GetLedgerConfigurationRequest, Seq[LedgerConfiguration], Seq[ + LedgerConfiguration + ]] { + + override def createRequest(): Either[String, GetLedgerConfigurationRequest] = + Right(GetLedgerConfigurationRequest()) + + override def submitRequest( + service: LedgerConfigurationServiceStub, + request: GetLedgerConfigurationRequest, + ): Future[Seq[LedgerConfiguration]] = + GrpcAdminCommand.streamedResponse[ + GetLedgerConfigurationRequest, + GetLedgerConfigurationResponse, + LedgerConfiguration, + ]( + service.getLedgerConfiguration, + _.ledgerConfiguration.toList, + request, + expectedConfigs, + timeout, + scheduler, + ) + + override def handleResponse( + response: Seq[LedgerConfiguration] + ): Either[String, Seq[LedgerConfiguration]] = + Right(response) + + override def timeoutType: TimeoutType = ServerEnforcedTimeout + } + } + + private[commands] trait SubmitCommand extends PrettyPrinting { + def actAs: Seq[LfPartyId] + def readAs: Seq[LfPartyId] + def commands: Seq[Command] + def workflowId: String + def commandId: String + def deduplicationPeriod: Option[DeduplicationPeriod] + def submissionId: String + def minLedgerTimeAbs: Option[Instant] + def disclosedContracts: Seq[DisclosedContract] + def applicationId: String + + protected def mkCommand: CommandsV1 = CommandsV1( + workflowId = workflowId, + applicationId = applicationId, + commandId = if (commandId.isEmpty) UUID.randomUUID().toString else commandId, + actAs = actAs, + readAs = readAs, + commands = commands, + deduplicationPeriod = deduplicationPeriod.fold( + CommandsV1.DeduplicationPeriod.Empty: CommandsV1.DeduplicationPeriod + ) { + case DeduplicationPeriod.DeduplicationDuration(duration) => + CommandsV1.DeduplicationPeriod.DeduplicationDuration( + ProtoConverter.DurationConverter.toProtoPrimitive(duration) + ) + case DeduplicationPeriod.DeduplicationOffset(offset) => + CommandsV1.DeduplicationPeriod.DeduplicationOffset( + offset.toHexString + ) + }, + minLedgerTimeAbs = + minLedgerTimeAbs.map(t => ProtoConverter.InstantConverter.toProtoPrimitive(t)), + submissionId = submissionId, + disclosedContracts = disclosedContracts, + ) + + override def pretty: Pretty[this.type] = + prettyOfClass( + param("actAs", _.actAs), + param("readAs", _.readAs), + param("commandId", _.commandId.singleQuoted), + param("workflowId", _.workflowId.singleQuoted), + param("submissionId", _.submissionId.singleQuoted), + param("deduplicationPeriod", _.deduplicationPeriod), + param("applicationId", _.applicationId.singleQuoted), + paramIfDefined("minLedgerTimeAbs", _.minLedgerTimeAbs), + paramWithoutValue("commands"), + ) + } + + object CommandSubmissionService { + trait BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = CommandSubmissionServiceStub + override def createService(channel: ManagedChannel): CommandSubmissionServiceStub = + CommandSubmissionServiceGrpc.stub(channel) + } + + final case class Submit( + override val actAs: Seq[LfPartyId], + override val readAs: Seq[LfPartyId], + override val commands: Seq[Command], + override val workflowId: String, + override val commandId: String, + override val deduplicationPeriod: Option[DeduplicationPeriod], + override val submissionId: String, + override val minLedgerTimeAbs: Option[Instant], + override val disclosedContracts: Seq[DisclosedContract], + override val applicationId: String, + ) extends SubmitCommand + with BaseCommand[SubmitRequest, Empty, Unit] { + override def createRequest(): Either[String, SubmitRequest] = Right( + SubmitRequest(commands = Some(mkCommand)) + ) + + override def submitRequest( + service: CommandSubmissionServiceStub, + request: SubmitRequest, + ): Future[Empty] = { + service.submit(request) + } + + override def handleResponse(response: Empty): Either[String, Unit] = Right(()) + } + } + + object CommandService { + trait BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = CommandServiceStub + override def createService(channel: ManagedChannel): CommandServiceStub = + CommandServiceGrpc.stub(channel) + } + + final case class SubmitAndWaitTransactionTree( + override val actAs: Seq[LfPartyId], + override val readAs: Seq[LfPartyId], + override val commands: Seq[Command], + override val workflowId: String, + override val commandId: String, + override val deduplicationPeriod: Option[DeduplicationPeriod], + override val submissionId: String, + override val minLedgerTimeAbs: Option[Instant], + override val disclosedContracts: Seq[DisclosedContract], + override val applicationId: String, + ) extends SubmitCommand + with BaseCommand[ + SubmitAndWaitRequest, + SubmitAndWaitForTransactionTreeResponse, + TransactionTree, + ] { + + override def createRequest(): Either[String, SubmitAndWaitRequest] = + Right(SubmitAndWaitRequest(commands = Some(mkCommand))) + + override def submitRequest( + service: CommandServiceStub, + request: SubmitAndWaitRequest, + ): Future[SubmitAndWaitForTransactionTreeResponse] = + service.submitAndWaitForTransactionTree(request) + + override def handleResponse( + response: SubmitAndWaitForTransactionTreeResponse + ): Either[String, TransactionTree] = + response.transaction.toRight("Received response without any transaction tree") + + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + + final case class SubmitAndWaitTransaction( + override val actAs: Seq[LfPartyId], + override val readAs: Seq[LfPartyId], + override val commands: Seq[Command], + override val workflowId: String, + override val commandId: String, + override val deduplicationPeriod: Option[DeduplicationPeriod], + override val submissionId: String, + override val minLedgerTimeAbs: Option[Instant], + override val disclosedContracts: Seq[DisclosedContract], + override val applicationId: String, + ) extends SubmitCommand + with BaseCommand[SubmitAndWaitRequest, SubmitAndWaitForTransactionResponse, Transaction] { + + override def createRequest(): Either[String, SubmitAndWaitRequest] = + Right(SubmitAndWaitRequest(commands = Some(mkCommand))) + + override def submitRequest( + service: CommandServiceStub, + request: SubmitAndWaitRequest, + ): Future[SubmitAndWaitForTransactionResponse] = + service.submitAndWaitForTransaction(request) + + override def handleResponse( + response: SubmitAndWaitForTransactionResponse + ): Either[String, Transaction] = + response.transaction.toRight("Received response without any transaction") + + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + } + + object AcsService { + abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = ActiveContractsServiceStub + override def createService(channel: ManagedChannel): ActiveContractsServiceStub = + ActiveContractsServiceGrpc.stub(channel) + } + + final case class GetActiveContracts( + parties: Set[LfPartyId], + limit: PositiveInt, + templateFilter: Seq[TemplateId] = Seq.empty, + verbose: Boolean = true, + timeout: FiniteDuration, + includeCreatedEventBlob: Boolean = false, + )(scheduler: ScheduledExecutorService) + extends BaseCommand[GetActiveContractsRequest, Seq[WrappedCreatedEvent], Seq[ + WrappedCreatedEvent + ]] { + + override def createRequest(): Either[String, GetActiveContractsRequest] = { + val filter = + if (templateFilter.nonEmpty) { + Filters( + Some( + InclusiveFilters(templateFilters = + templateFilter.map(tId => + TemplateFilter(Some(tId.toIdentifier), includeCreatedEventBlob) + ) + ) + ) + ) + } else Filters.defaultInstance + Right( + GetActiveContractsRequest( + filter = Some(TransactionFilter(parties.map((_, filter)).toMap)), + verbose = verbose, + ) + ) + } + + override def submitRequest( + service: ActiveContractsServiceStub, + request: GetActiveContractsRequest, + ): Future[Seq[WrappedCreatedEvent]] = { + GrpcAdminCommand.streamedResponse[ + GetActiveContractsRequest, + GetActiveContractsResponse, + WrappedCreatedEvent, + ]( + service.getActiveContracts, + _.activeContracts.map(WrappedCreatedEvent), + request, + limit.value, + timeout, + scheduler, + ) + } + + override def handleResponse( + response: Seq[WrappedCreatedEvent] + ): Either[String, Seq[WrappedCreatedEvent]] = { + Right(response) + } + + // fetching ACS might take long if we fetch a lot of data + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + + } + + object ParticipantPruningService { + abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = ParticipantPruningServiceStub + override def createService(channel: ManagedChannel): ParticipantPruningServiceStub = + ParticipantPruningServiceGrpc.stub(channel) + + // all pruning commands will take a long time + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + + final case class Prune(pruneUpTo: LedgerOffset) + extends BaseCommand[PruneRequest, PruneResponse, Unit] { + + override def timeoutType: TimeoutType = + DefaultUnboundedTimeout // pruning can take a very long time + + override def createRequest(): Either[String, PruneRequest] = + pruneUpTo.value.absolute + .toRight("The pruneUpTo ledger offset needs to be absolute") + .map( + PruneRequest( + _, + // canton always prunes divulged contracts both in the ledger api index-db and in canton stores + pruneAllDivulgedContracts = true, + ) + ) + + override def submitRequest( + service: ParticipantPruningServiceStub, + request: PruneRequest, + ): Future[PruneResponse] = + service.prune(request) + + override def handleResponse(response: PruneResponse): Either[String, Unit] = Right(()) + } + } + + object Users { + abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = UserManagementServiceStub + + override def createService(channel: ManagedChannel): UserManagementServiceStub = + UserManagementServiceGrpc.stub(channel) + } + + trait HasRights { + def actAs: Set[LfPartyId] + def readAs: Set[LfPartyId] + def participantAdmin: Boolean + + protected def getRights: Seq[UserRight] = { + actAs.toSeq.map(x => UserRight().withCanActAs(UserRight.CanActAs(x))) ++ + readAs.toSeq.map(x => UserRight().withCanReadAs(UserRight.CanReadAs(x))) ++ + (if (participantAdmin) Seq(UserRight().withParticipantAdmin(UserRight.ParticipantAdmin())) + else Seq()) + } + } + + final case class Create( + id: String, + actAs: Set[LfPartyId], + primaryParty: Option[LfPartyId], + readAs: Set[LfPartyId], + participantAdmin: Boolean, + isDeactivated: Boolean, + annotations: Map[String, String], + identityProviderId: String, + ) extends BaseCommand[CreateUserRequest, CreateUserResponse, LedgerApiUser] + with HasRights { + + override def submitRequest( + service: UserManagementServiceStub, + request: CreateUserRequest, + ): Future[CreateUserResponse] = + service.createUser(request) + + override def createRequest(): Either[String, CreateUserRequest] = Right( + CreateUserRequest( + user = Some( + User( + id = id, + primaryParty = primaryParty.getOrElse(""), + isDeactivated = isDeactivated, + metadata = Some(ObjectMeta(annotations = annotations)), + identityProviderId = identityProviderId, + ) + ), + rights = getRights, + ) + ) + + override def handleResponse(response: CreateUserResponse): Either[String, LedgerApiUser] = + ProtoConverter + .parseRequired(LedgerApiUser.fromProtoV0, "user", response.user) + .leftMap(_.toString) + + } + + final case class Update( + id: String, + primaryPartyUpdate: Option[Option[PartyId]], + isDeactivatedUpdate: Option[Boolean], + annotationsUpdate: Option[Map[String, String]], + resourceVersionO: Option[String], + identityProviderId: String, + ) extends BaseCommand[UpdateUserRequest, UpdateUserResponse, LedgerApiUser] { + + override def submitRequest( + service: UserManagementServiceStub, + request: UpdateUserRequest, + ): Future[UpdateUserResponse] = + service.updateUser(request) + + override def createRequest(): Either[String, UpdateUserRequest] = { + val user = User( + id = id, + primaryParty = primaryPartyUpdate.fold("")(_.fold("")(_.toProtoPrimitive)), + isDeactivated = isDeactivatedUpdate.getOrElse(false), + metadata = Some( + ObjectMeta( + annotations = annotationsUpdate.getOrElse(Map.empty), + resourceVersion = resourceVersionO.getOrElse(""), + ) + ), + identityProviderId = identityProviderId, + ) + val updatePaths: Seq[String] = Seq( + primaryPartyUpdate.map(_ => "primary_party"), + isDeactivatedUpdate.map(_ => "is_deactivated"), + annotationsUpdate.map(_ => "metadata.annotations"), + ).flatten + Right( + UpdateUserRequest( + user = Some(user), + updateMask = Some(FieldMask(paths = updatePaths)), + ) + ) + } + + override def handleResponse(response: UpdateUserResponse): Either[String, LedgerApiUser] = + ProtoConverter + .parseRequired(LedgerApiUser.fromProtoV0, "user", response.user) + .leftMap(_.toString) + + } + + final case class Get( + id: String, + identityProviderId: String, + ) extends BaseCommand[GetUserRequest, GetUserResponse, LedgerApiUser] { + + override def submitRequest( + service: UserManagementServiceStub, + request: GetUserRequest, + ): Future[GetUserResponse] = + service.getUser(request) + + override def createRequest(): Either[String, GetUserRequest] = Right( + GetUserRequest( + userId = id, + identityProviderId = identityProviderId, + ) + ) + + override def handleResponse(response: GetUserResponse): Either[String, LedgerApiUser] = + ProtoConverter + .parseRequired(LedgerApiUser.fromProtoV0, "user", response.user) + .leftMap(_.toString) + + } + + final case class Delete( + id: String, + identityProviderId: String, + ) extends BaseCommand[DeleteUserRequest, DeleteUserResponse, Unit] { + + override def submitRequest( + service: UserManagementServiceStub, + request: DeleteUserRequest, + ): Future[DeleteUserResponse] = + service.deleteUser(request) + + override def createRequest(): Either[String, DeleteUserRequest] = Right( + DeleteUserRequest( + userId = id, + identityProviderId = identityProviderId, + ) + ) + + override def handleResponse(response: DeleteUserResponse): Either[String, Unit] = Right(()) + + } + + final case class UpdateIdp( + id: String, + sourceIdentityProviderId: String, + targetIdentityProviderId: String, + ) extends BaseCommand[ + UpdateUserIdentityProviderRequest, + UpdateUserIdentityProviderResponse, + Unit, + ] { + + override def submitRequest( + service: UserManagementServiceStub, + request: UpdateUserIdentityProviderRequest, + ): Future[UpdateUserIdentityProviderResponse] = + service.updateUserIdentityProviderId(request) + + override def createRequest(): Either[String, UpdateUserIdentityProviderRequest] = Right( + UpdateUserIdentityProviderRequest( + userId = id, + sourceIdentityProviderId = sourceIdentityProviderId, + targetIdentityProviderId = targetIdentityProviderId, + ) + ) + + override def handleResponse( + response: UpdateUserIdentityProviderResponse + ): Either[String, Unit] = Right(()) + + } + + final case class List( + filterUser: String, + pageToken: String, + pageSize: Int, + identityProviderId: String, + ) extends BaseCommand[ListUsersRequest, ListUsersResponse, ListLedgerApiUsersResult] { + + override def submitRequest( + service: UserManagementServiceStub, + request: ListUsersRequest, + ): Future[ListUsersResponse] = + service.listUsers(request) + + override def createRequest(): Either[String, ListUsersRequest] = Right( + ListUsersRequest( + pageToken = pageToken, + pageSize = pageSize, + identityProviderId = identityProviderId, + ) + ) + + override def handleResponse( + response: ListUsersResponse + ): Either[String, ListLedgerApiUsersResult] = + ListLedgerApiUsersResult.fromProtoV0(response, filterUser).leftMap(_.toString) + + } + + object Rights { + final case class Grant( + id: String, + actAs: Set[LfPartyId], + readAs: Set[LfPartyId], + participantAdmin: Boolean, + identityProviderId: String, + ) extends BaseCommand[GrantUserRightsRequest, GrantUserRightsResponse, UserRights] + with HasRights { + + override def submitRequest( + service: UserManagementServiceStub, + request: GrantUserRightsRequest, + ): Future[GrantUserRightsResponse] = + service.grantUserRights(request) + + override def createRequest(): Either[String, GrantUserRightsRequest] = Right( + GrantUserRightsRequest( + userId = id, + rights = getRights, + identityProviderId = identityProviderId, + ) + ) + + override def handleResponse(response: GrantUserRightsResponse): Either[String, UserRights] = + UserRights.fromProtoV0(response.newlyGrantedRights).leftMap(_.toString) + + } + + final case class Revoke( + id: String, + actAs: Set[LfPartyId], + readAs: Set[LfPartyId], + participantAdmin: Boolean, + identityProviderId: String, + ) extends BaseCommand[RevokeUserRightsRequest, RevokeUserRightsResponse, UserRights] + with HasRights { + + override def submitRequest( + service: UserManagementServiceStub, + request: RevokeUserRightsRequest, + ): Future[RevokeUserRightsResponse] = + service.revokeUserRights(request) + + override def createRequest(): Either[String, RevokeUserRightsRequest] = Right( + RevokeUserRightsRequest( + userId = id, + rights = getRights, + identityProviderId = identityProviderId, + ) + ) + + override def handleResponse( + response: RevokeUserRightsResponse + ): Either[String, UserRights] = + UserRights.fromProtoV0(response.newlyRevokedRights).leftMap(_.toString) + + } + + final case class List(id: String, identityProviderId: String) + extends BaseCommand[ListUserRightsRequest, ListUserRightsResponse, UserRights] { + + override def submitRequest( + service: UserManagementServiceStub, + request: ListUserRightsRequest, + ): Future[ListUserRightsResponse] = + service.listUserRights(request) + + override def createRequest(): Either[String, ListUserRightsRequest] = Right( + ListUserRightsRequest(userId = id, identityProviderId = identityProviderId) + ) + + override def handleResponse(response: ListUserRightsResponse): Either[String, UserRights] = + UserRights.fromProtoV0(response.rights).leftMap(_.toString) + + } + + } + + } + + object IdentityProviderConfigs { + abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = IdentityProviderConfigServiceStub + + override def createService(channel: ManagedChannel): IdentityProviderConfigServiceStub = + IdentityProviderConfigServiceGrpc.stub(channel) + } + + final case class Create( + identityProviderId: IdentityProviderId.Id, + isDeactivated: Boolean = false, + jwksUrl: JwksUrl, + issuer: String, + audience: Option[String], + ) extends BaseCommand[ + CreateIdentityProviderConfigRequest, + CreateIdentityProviderConfigResponse, + IdentityProviderConfig, + ] { + + override def submitRequest( + service: IdentityProviderConfigServiceStub, + request: CreateIdentityProviderConfigRequest, + ): Future[CreateIdentityProviderConfigResponse] = + service.createIdentityProviderConfig(request) + + override def createRequest(): Either[String, CreateIdentityProviderConfigRequest] = + Right( + CreateIdentityProviderConfigRequest( + Some( + IdentityProviderConfig( + identityProviderId = identityProviderId.value, + isDeactivated = isDeactivated, + issuer = issuer, + jwksUrl = jwksUrl.value, + audience = audience.getOrElse(""), + ) + ) + ) + ) + + override def handleResponse( + response: CreateIdentityProviderConfigResponse + ): Either[String, IdentityProviderConfig] = + response.identityProviderConfig.toRight("config could not be created") + } + + final case class Update( + identityProviderConfig: domain.IdentityProviderConfig, + updateMask: FieldMask, + ) extends BaseCommand[ + UpdateIdentityProviderConfigRequest, + UpdateIdentityProviderConfigResponse, + IdentityProviderConfig, + ] { + + override def submitRequest( + service: IdentityProviderConfigServiceStub, + request: UpdateIdentityProviderConfigRequest, + ): Future[UpdateIdentityProviderConfigResponse] = + service.updateIdentityProviderConfig(request) + + override def createRequest(): Either[String, UpdateIdentityProviderConfigRequest] = + Right( + UpdateIdentityProviderConfigRequest( + identityProviderConfig = + Some(IdentityProviderConfigClient.toProtoConfig(identityProviderConfig)), + Some(updateMask), + ) + ) + + override def handleResponse( + response: UpdateIdentityProviderConfigResponse + ): Either[String, IdentityProviderConfig] = + response.identityProviderConfig.toRight("config could not be updated") + } + + final case class Delete(identityProviderId: IdentityProviderId) + extends BaseCommand[ + DeleteIdentityProviderConfigRequest, + DeleteIdentityProviderConfigResponse, + Unit, + ] { + + override def submitRequest( + service: IdentityProviderConfigServiceStub, + request: DeleteIdentityProviderConfigRequest, + ): Future[DeleteIdentityProviderConfigResponse] = + service.deleteIdentityProviderConfig(request) + + override def createRequest(): Either[String, DeleteIdentityProviderConfigRequest] = + Right( + DeleteIdentityProviderConfigRequest(identityProviderId = + identityProviderId.toRequestString + ) + ) + + override def handleResponse( + response: DeleteIdentityProviderConfigResponse + ): Either[String, Unit] = + Right(()) + } + + final case class Get(identityProviderId: IdentityProviderId) + extends BaseCommand[ + GetIdentityProviderConfigRequest, + GetIdentityProviderConfigResponse, + IdentityProviderConfig, + ] { + + override def submitRequest( + service: IdentityProviderConfigServiceStub, + request: GetIdentityProviderConfigRequest, + ): Future[GetIdentityProviderConfigResponse] = + service.getIdentityProviderConfig(request) + + override def createRequest(): Either[String, GetIdentityProviderConfigRequest] = + Right( + GetIdentityProviderConfigRequest(identityProviderId.toRequestString) + ) + + override def handleResponse( + response: GetIdentityProviderConfigResponse + ): Either[String, IdentityProviderConfig] = + Right(response.getIdentityProviderConfig) + } + + final case class List() + extends BaseCommand[ + ListIdentityProviderConfigsRequest, + ListIdentityProviderConfigsResponse, + Seq[IdentityProviderConfig], + ] { + + override def submitRequest( + service: IdentityProviderConfigServiceStub, + request: ListIdentityProviderConfigsRequest, + ): Future[ListIdentityProviderConfigsResponse] = + service.listIdentityProviderConfigs(request) + + override def createRequest(): Either[String, ListIdentityProviderConfigsRequest] = + Right( + ListIdentityProviderConfigsRequest() + ) + + override def handleResponse( + response: ListIdentityProviderConfigsResponse + ): Either[String, Seq[IdentityProviderConfig]] = + Right(response.identityProviderConfigs) + } + + } + + object Metering { + abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = MeteringReportServiceStub + + override def createService(channel: ManagedChannel): MeteringReportServiceStub = + MeteringReportServiceGrpc.stub(channel) + } + + final case class GetReport( + from: CantonTimestamp, + to: Option[CantonTimestamp], + applicationId: Option[String], + ) extends BaseCommand[ + GetMeteringReportRequest, + GetMeteringReportResponse, + String, + ] { + + override def submitRequest( + service: MeteringReportServiceStub, + request: GetMeteringReportRequest, + ): Future[GetMeteringReportResponse] = + service.getMeteringReport(request) + + override def createRequest(): Either[String, GetMeteringReportRequest] = + Right( + GetMeteringReportRequest( + from = Some(from.toProtoPrimitive), + to = to.map(_.toProtoPrimitive), + applicationId = applicationId.getOrElse(""), + ) + ) + + override def handleResponse( + response: GetMeteringReportResponse + ): Either[String, String] = + LedgerMeteringReport.fromProtoV0(response).leftMap(_.toString) + } + } + + object Time { + abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = TimeServiceStub + + override def createService(channel: ManagedChannel): TimeServiceStub = + TimeServiceGrpc.stub(channel) + } + + final case class Get(timeout: FiniteDuration)(scheduler: ScheduledExecutorService) + extends BaseCommand[ + GetTimeRequest, + Seq[Either[String, CantonTimestamp]], + CantonTimestamp, + ] { + + override def submitRequest( + service: TimeServiceStub, + request: GetTimeRequest, + ): Future[Seq[Either[String, CantonTimestamp]]] = + GrpcAdminCommand.streamedResponse[ + GetTimeRequest, + GetTimeResponse, + Either[String, CantonTimestamp], + ]( + service.getTime, + x => { + val tmp = x.currentTime + .toRight("Empty timestamp received from ledger Api server") + .flatMap(CantonTimestamp.fromProtoPrimitive(_).leftMap(_.message)) + Seq(tmp) + }, + request, + 1, + timeout: FiniteDuration, + scheduler, + ) + + /** Create the request from configured options + */ + override def createRequest(): Either[String, GetTimeRequest] = Right(GetTimeRequest()) + + /** Handle the response the service has provided + */ + override def handleResponse( + response: Seq[Either[String, CantonTimestamp]] + ): Either[String, CantonTimestamp] = + response.headOption.toRight("No timestamp received from ledger Api server").flatten + } + + final case class Set(currentTime: CantonTimestamp, newTime: CantonTimestamp) + extends BaseCommand[ + SetTimeRequest, + Empty, + Unit, + ] { + + override def submitRequest(service: TimeServiceStub, request: SetTimeRequest): Future[Empty] = + service.setTime(request) + + override def createRequest(): Either[String, SetTimeRequest] = + Right( + SetTimeRequest( + currentTime = Some(currentTime.toProtoPrimitive), + newTime = Some(newTime.toProtoPrimitive), + ) + ) + + /** Handle the response the service has provided + */ + override def handleResponse(response: Empty): Either[String, Unit] = Either.unit + + } + + } + + object QueryService { + + abstract class BaseCommand[Req, Res] extends GrpcAdminCommand[Req, Res, Res] { + override type Svc = EventQueryServiceStub + + override def createService(channel: ManagedChannel): EventQueryServiceStub = + EventQueryServiceGrpc.stub(channel) + + override def handleResponse(response: Res): Either[String, Res] = Right(response) + } + + final case class GetEventsByContractId( + contractId: String, + requestingParties: Seq[String], + ) extends BaseCommand[ + GetEventsByContractIdRequest, + GetEventsByContractIdResponse, + ] { + + override def createRequest(): Either[String, GetEventsByContractIdRequest] = Right( + GetEventsByContractIdRequest( + contractId = contractId, + requestingParties = requestingParties, + ) + ) + + override def submitRequest( + service: EventQueryServiceStub, + request: GetEventsByContractIdRequest, + ): Future[GetEventsByContractIdResponse] = service.getEventsByContractId(request) + + } + + final case class GetEventsByContractKey( + contractKey: api.v1.value.Value, + requestingParties: Seq[String], + templateId: TemplateId, + continuationToken: Option[String], + ) extends BaseCommand[ + GetEventsByContractKeyRequest, + GetEventsByContractKeyResponse, + ] { + + override def createRequest(): Either[String, GetEventsByContractKeyRequest] = { + Right( + GetEventsByContractKeyRequest( + contractKey = Some(contractKey), + requestingParties = requestingParties, + templateId = Some(templateId.toIdentifier), + continuationToken = continuationToken.getOrElse( + GetEventsByContractKeyRequest.defaultInstance.continuationToken + ), + ) + ) + } + + override def submitRequest( + service: EventQueryServiceStub, + request: GetEventsByContractKeyRequest, + ): Future[GetEventsByContractKeyResponse] = service.getEventsByContractKey(request) + } + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiTypeWrappers.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiTypeWrappers.scala new file mode 100644 index 0000000000..37c2aa4faa --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiTypeWrappers.scala @@ -0,0 +1,112 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.commands + +import com.daml.ledger.api.v1.event.CreatedEvent +import com.daml.ledger.api.v1.value.{Record, RecordField, Value} +import com.daml.lf.data.Time +import com.daml.lf.transaction.TransactionCoder +import com.digitalasset.canton.admin.api.client.data.TemplateId +import com.digitalasset.canton.crypto.Salt +import com.digitalasset.canton.ledger.api.util.TimestampConversion +import com.digitalasset.canton.protocol.{DriverContractMetadata, LfContractId} + +/** Wrapper class to make scalapb LedgerApi classes more convenient to access + */ +object LedgerApiTypeWrappers { + + /* + Provide a few utilities methods on CreatedEvent. + Notes: + * We don't use an `implicit class` because it makes the use of pretty + instances difficult (e.g. for `ledger_api.acs.of_all`). + + * Also, the name of some methods of `WrappedCreatedEvent`, such as `templateId`, + collides with one of the underlying event. + */ + final case class WrappedCreatedEvent(event: CreatedEvent) { + + private def corrupt: String = s"corrupt event ${event.eventId} / ${event.contractId}" + + def templateId: TemplateId = { + TemplateId.fromIdentifier( + event.templateId.getOrElse( + throw new IllegalArgumentException( + s"Template Id not specified for event ${event.eventId} / ${event.contractId}" + ) + ) + ) + } + + def packageId: String = { + event.templateId.map(_.packageId).getOrElse(corrupt) + } + + private def flatten(prefix: Seq[String], field: RecordField): Seq[(String, Any)] = { + def extract(args: Value.Sum): Seq[(String, Any)] = + args match { + case x: Value.Sum.Record => x.value.fields.flatMap(flatten(prefix :+ field.label, _)) + case x: Value.Sum.Variant => x.value.value.toList.map(_.sum).flatMap(extract) + case x => Seq(((prefix :+ field.label).mkString("."), x.value)) + } + field.value.map(_.sum).toList.flatMap(extract) + } + + def arguments: Map[String, Any] = + event.createArguments.toList.flatMap(_.fields).flatMap(flatten(Seq(), _)).toMap + + def toContractData: ContractData = { + val templateId = TemplateId.fromIdentifier( + event.templateId.getOrElse(throw new IllegalArgumentException("Template Id not specified")) + ) + val createArguments = + event.createArguments.getOrElse( + throw new IllegalArgumentException("Create Arguments not specified") + ) + val lfContractId = + LfContractId + .fromString(event.contractId) + .getOrElse( + throw new IllegalArgumentException(s"Illegal Contract Id: ${event.contractId}") + ) + + val contractSaltO = for { + fatInstance <- TransactionCoder.decodeFatContractInstance(event.createdEventBlob).toOption + parsed = DriverContractMetadata.fromByteString(fatInstance.cantonData.toByteString) + } yield parsed.fold[Salt]( + err => + throw new IllegalArgumentException( + s"Could not deserialize driver contract metadata: ${err.message}" + ), + _.salt, + ) + + val ledgerCreateTimeO = + event.createdAt.map(TimestampConversion.toLf(_, TimestampConversion.ConversionMode.Exact)) + + ContractData( + templateId = templateId, + createArguments = createArguments, + signatories = event.signatories.toSet, + observers = event.observers.toSet, + inheritedContractId = lfContractId, + contractSalt = contractSaltO, + ledgerCreateTime = ledgerCreateTimeO, + ) + } + } + + /** Holder of "core" contract defining fields (particularly those relevant for importing contracts) */ + final case class ContractData( + templateId: TemplateId, + createArguments: Record, + // track signatories and observers for use as auth validation by daml engine + signatories: Set[String], + observers: Set[String], + inheritedContractId: LfContractId, + contractSalt: Option[Salt], + ledgerCreateTime: Option[Time.Timestamp], + ) + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiV2Commands.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiV2Commands.scala new file mode 100644 index 0000000000..8f4f5d7d81 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiV2Commands.scala @@ -0,0 +1,849 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.commands + +import com.daml.ledger.api.v1.command_completion_service.Checkpoint +import com.daml.ledger.api.v1.commands.{Command, DisclosedContract} +import com.daml.ledger.api.v1.event_query_service.GetEventsByContractIdRequest +import com.daml.ledger.api.v1.transaction_filter.{Filters, InclusiveFilters, TemplateFilter} +import com.daml.ledger.api.v2.command_completion_service.CommandCompletionServiceGrpc.CommandCompletionServiceStub +import com.daml.ledger.api.v2.command_completion_service.{ + CommandCompletionServiceGrpc, + CompletionStreamRequest, + CompletionStreamResponse, +} +import com.daml.ledger.api.v2.command_service.CommandServiceGrpc.CommandServiceStub +import com.daml.ledger.api.v2.command_service.{ + CommandServiceGrpc, + SubmitAndWaitForTransactionResponse, + SubmitAndWaitForTransactionTreeResponse, + SubmitAndWaitRequest, +} +import com.daml.ledger.api.v2.command_submission_service.CommandSubmissionServiceGrpc.CommandSubmissionServiceStub +import com.daml.ledger.api.v2.command_submission_service.{ + CommandSubmissionServiceGrpc, + SubmitReassignmentRequest, + SubmitReassignmentResponse, + SubmitRequest, + SubmitResponse, +} +import com.daml.ledger.api.v2.commands.Commands +import com.daml.ledger.api.v2.completion.Completion +import com.daml.ledger.api.v2.event_query_service.EventQueryServiceGrpc.EventQueryServiceStub +import com.daml.ledger.api.v2.event_query_service.{ + EventQueryServiceGrpc, + GetEventsByContractIdResponse, +} +import com.daml.ledger.api.v2.participant_offset.ParticipantOffset +import com.daml.ledger.api.v2.reassignment.{AssignedEvent, Reassignment, UnassignedEvent} +import com.daml.ledger.api.v2.reassignment_command.{ + AssignCommand, + ReassignmentCommand, + UnassignCommand, +} +import com.daml.ledger.api.v2.state_service.StateServiceGrpc.StateServiceStub +import com.daml.ledger.api.v2.state_service.{ + GetActiveContractsRequest, + GetActiveContractsResponse, + GetConnectedDomainsRequest, + GetConnectedDomainsResponse, + GetLedgerEndRequest, + GetLedgerEndResponse, + StateServiceGrpc, +} +import com.daml.ledger.api.v2.testing.time_service.TimeServiceGrpc.TimeServiceStub +import com.daml.ledger.api.v2.testing.time_service.{ + GetTimeRequest, + GetTimeResponse, + SetTimeRequest, + TimeServiceGrpc, +} +import com.daml.ledger.api.v2.transaction.{Transaction, TransactionTree} +import com.daml.ledger.api.v2.transaction_filter.TransactionFilter +import com.daml.ledger.api.v2.update_service.UpdateServiceGrpc.UpdateServiceStub +import com.daml.ledger.api.v2.update_service.{ + GetTransactionByIdRequest, + GetTransactionTreeResponse, + GetUpdateTreesResponse, + GetUpdatesRequest, + GetUpdatesResponse, + UpdateServiceGrpc, +} +import com.digitalasset.canton.LfPartyId +import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{ + DefaultUnboundedTimeout, + ServerEnforcedTimeout, + TimeoutType, +} +import com.digitalasset.canton.admin.api.client.data.TemplateId +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.ledger.api.DeduplicationPeriod +import com.digitalasset.canton.logging.ErrorLoggingContext +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.networking.grpc.ForwardingStreamObserver +import com.digitalasset.canton.protocol.LfContractId +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.topology.DomainId +import com.google.protobuf.empty.Empty +import io.grpc.* +import io.grpc.stub.StreamObserver + +import java.time.Instant +import java.util.UUID +import java.util.concurrent.ScheduledExecutorService +import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{ExecutionContext, Future} + +// TODO(#15280) delete LedgerApiCommands, and rename this to LedgerApiCommands +object LedgerApiV2Commands { + + object UpdateService { + + sealed trait UpdateTreeWrapper + sealed trait UpdateWrapper + final case class TransactionTreeWrapper(transactionTree: TransactionTree) + extends UpdateTreeWrapper + final case class TransactionWrapper(transaction: Transaction) extends UpdateWrapper + sealed trait ReassignmentWrapper extends UpdateTreeWrapper with UpdateWrapper { + def reassignment: Reassignment + } + object ReassignmentWrapper { + def apply(reassignment: Reassignment): ReassignmentWrapper = { + val event = reassignment.event + event.assignedEvent + .map[ReassignmentWrapper](AssignedWrapper(reassignment, _)) + .orElse( + event.unassignedEvent.map[ReassignmentWrapper](UnassignedWrapper(reassignment, _)) + ) + .getOrElse( + throw new IllegalStateException( + s"Invalid reassignment event (only supported UnassignedEvent and AssignedEvent): ${reassignment.event}" + ) + ) + } + } + final case class AssignedWrapper(reassignment: Reassignment, assignedEvent: AssignedEvent) + extends ReassignmentWrapper + final case class UnassignedWrapper(reassignment: Reassignment, unassignedEvent: UnassignedEvent) + extends ReassignmentWrapper + + trait BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = UpdateServiceStub + + override def createService(channel: ManagedChannel): UpdateServiceStub = + UpdateServiceGrpc.stub(channel) + } + + trait SubscribeBase[Resp, Res] + extends BaseCommand[GetUpdatesRequest, AutoCloseable, AutoCloseable] { + // The subscription should never be cut short because of a gRPC timeout + override def timeoutType: TimeoutType = ServerEnforcedTimeout + + def observer: StreamObserver[Res] + + def beginExclusive: ParticipantOffset + + def endInclusive: Option[ParticipantOffset] + + def filter: TransactionFilter + + def verbose: Boolean + + def doRequest( + service: UpdateServiceStub, + request: GetUpdatesRequest, + rawObserver: StreamObserver[Resp], + ): Unit + + def extractResults(response: Resp): IterableOnce[Res] + + implicit def loggingContext: ErrorLoggingContext + + override def createRequest(): Either[String, GetUpdatesRequest] = Right { + GetUpdatesRequest( + beginExclusive = Some(beginExclusive), + endInclusive = endInclusive, + verbose = verbose, + filter = Some(filter), + ) + } + + override def submitRequest( + service: UpdateServiceStub, + request: GetUpdatesRequest, + ): Future[AutoCloseable] = { + val rawObserver = new ForwardingStreamObserver[Resp, Res](observer, extractResults) + val context = Context.current().withCancellation() + context.run(() => doRequest(service, request, rawObserver)) + Future.successful(context) + } + + override def handleResponse(response: AutoCloseable): Either[String, AutoCloseable] = Right( + response + ) + } + + final case class SubscribeTrees( + override val observer: StreamObserver[UpdateTreeWrapper], + override val beginExclusive: ParticipantOffset, + override val endInclusive: Option[ParticipantOffset], + override val filter: TransactionFilter, + override val verbose: Boolean, + )(override implicit val loggingContext: ErrorLoggingContext) + extends SubscribeBase[GetUpdateTreesResponse, UpdateTreeWrapper] { + override def doRequest( + service: UpdateServiceStub, + request: GetUpdatesRequest, + rawObserver: StreamObserver[GetUpdateTreesResponse], + ): Unit = + service.getUpdateTrees(request, rawObserver) + + override def extractResults( + response: GetUpdateTreesResponse + ): IterableOnce[UpdateTreeWrapper] = + response.update.transactionTree + .map[UpdateTreeWrapper](TransactionTreeWrapper) + .orElse(response.update.reassignment.map(ReassignmentWrapper(_))) + } + + final case class SubscribeFlat( + override val observer: StreamObserver[UpdateWrapper], + override val beginExclusive: ParticipantOffset, + override val endInclusive: Option[ParticipantOffset], + override val filter: TransactionFilter, + override val verbose: Boolean, + )(override implicit val loggingContext: ErrorLoggingContext) + extends SubscribeBase[GetUpdatesResponse, UpdateWrapper] { + override def doRequest( + service: UpdateServiceStub, + request: GetUpdatesRequest, + rawObserver: StreamObserver[GetUpdatesResponse], + ): Unit = + service.getUpdates(request, rawObserver) + + override def extractResults(response: GetUpdatesResponse): IterableOnce[UpdateWrapper] = + response.update.transaction + .map[UpdateWrapper](TransactionWrapper) + .orElse(response.update.reassignment.map(ReassignmentWrapper(_))) + } + + final case class GetTransactionById(parties: Set[LfPartyId], id: String)(implicit + ec: ExecutionContext + ) extends BaseCommand[GetTransactionByIdRequest, GetTransactionTreeResponse, Option[ + TransactionTree + ]] + with PrettyPrinting { + override def createRequest(): Either[String, GetTransactionByIdRequest] = Right { + GetTransactionByIdRequest( + updateId = id, + requestingParties = parties.toSeq, + ) + } + + override def submitRequest( + service: UpdateServiceStub, + request: GetTransactionByIdRequest, + ): Future[GetTransactionTreeResponse] = { + // The Ledger API will throw an error if it can't find a transaction by ID. + // However, as Canton is distributed, a transaction ID might show up later, so we don't treat this as + // an error and change it to a None + service.getTransactionTreeById(request).recover { + case e: StatusRuntimeException if e.getStatus.getCode == Status.Code.NOT_FOUND => + GetTransactionTreeResponse(None) + } + } + + override def handleResponse( + response: GetTransactionTreeResponse + ): Either[String, Option[TransactionTree]] = + Right(response.transaction) + + override def pretty: Pretty[GetTransactionById] = + prettyOfClass( + param("id", _.id.unquoted), + param("parties", _.parties), + ) + } + + } + + private[commands] trait SubmitCommand extends PrettyPrinting { + def actAs: Seq[LfPartyId] + def readAs: Seq[LfPartyId] + def commands: Seq[Command] + def workflowId: String + def commandId: String + def deduplicationPeriod: Option[DeduplicationPeriod] + def submissionId: String + def minLedgerTimeAbs: Option[Instant] + def disclosedContracts: Seq[DisclosedContract] + def domainId: DomainId + def applicationId: String + + protected def mkCommand: Commands = Commands( + workflowId = workflowId, + applicationId = applicationId, + commandId = if (commandId.isEmpty) UUID.randomUUID().toString else commandId, + actAs = actAs, + readAs = readAs, + commands = commands, + deduplicationPeriod = deduplicationPeriod.fold( + Commands.DeduplicationPeriod.Empty: Commands.DeduplicationPeriod + ) { + case DeduplicationPeriod.DeduplicationDuration(duration) => + Commands.DeduplicationPeriod.DeduplicationDuration( + ProtoConverter.DurationConverter.toProtoPrimitive(duration) + ) + case DeduplicationPeriod.DeduplicationOffset(offset) => + Commands.DeduplicationPeriod.DeduplicationOffset( + offset.toHexString + ) + }, + minLedgerTimeAbs = minLedgerTimeAbs.map(ProtoConverter.InstantConverter.toProtoPrimitive), + submissionId = submissionId, + disclosedContracts = disclosedContracts, + domainId = domainId.toProtoPrimitive, + ) + + override def pretty: Pretty[this.type] = + prettyOfClass( + param("actAs", _.actAs), + param("readAs", _.readAs), + param("commandId", _.commandId.singleQuoted), + param("workflowId", _.workflowId.singleQuoted), + param("submissionId", _.submissionId.singleQuoted), + param("deduplicationPeriod", _.deduplicationPeriod), + paramIfDefined("minLedgerTimeAbs", _.minLedgerTimeAbs), + paramWithoutValue("commands"), + ) + } + + object CommandSubmissionService { + trait BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = CommandSubmissionServiceStub + override def createService(channel: ManagedChannel): CommandSubmissionServiceStub = + CommandSubmissionServiceGrpc.stub(channel) + } + + final case class Submit( + override val actAs: Seq[LfPartyId], + override val readAs: Seq[LfPartyId], + override val commands: Seq[Command], + override val workflowId: String, + override val commandId: String, + override val deduplicationPeriod: Option[DeduplicationPeriod], + override val submissionId: String, + override val minLedgerTimeAbs: Option[Instant], + override val disclosedContracts: Seq[DisclosedContract], + override val domainId: DomainId, + override val applicationId: String, + ) extends SubmitCommand + with BaseCommand[SubmitRequest, SubmitResponse, Unit] { + override def createRequest(): Either[String, SubmitRequest] = Right( + SubmitRequest(commands = Some(mkCommand)) + ) + + override def submitRequest( + service: CommandSubmissionServiceStub, + request: SubmitRequest, + ): Future[SubmitResponse] = { + service.submit(request) + } + + override def handleResponse(response: SubmitResponse): Either[String, Unit] = Right(()) + } + + final case class SubmitAssignCommand( + workflowId: String, + applicationId: String, + commandId: String, + submitter: LfPartyId, + submissionId: String, + unassignId: String, + source: DomainId, + target: DomainId, + ) extends BaseCommand[SubmitReassignmentRequest, SubmitReassignmentResponse, Unit] { + override def createRequest(): Either[String, SubmitReassignmentRequest] = Right( + SubmitReassignmentRequest( + Some( + ReassignmentCommand( + workflowId = workflowId, + applicationId = applicationId, + commandId = commandId, + submitter = submitter.toString, + command = ReassignmentCommand.Command.AssignCommand( + AssignCommand( + unassignId = unassignId, + source = source.toProtoPrimitive, + target = target.toProtoPrimitive, + ) + ), + submissionId = submissionId, + ) + ) + ) + ) + + override def submitRequest( + service: CommandSubmissionServiceStub, + request: SubmitReassignmentRequest, + ): Future[SubmitReassignmentResponse] = { + service.submitReassignment(request) + } + + override def handleResponse(response: SubmitReassignmentResponse): Either[String, Unit] = + Right(()) + } + + final case class SubmitUnassignCommand( + workflowId: String, + applicationId: String, + commandId: String, + submitter: LfPartyId, + submissionId: String, + contractId: LfContractId, + source: DomainId, + target: DomainId, + ) extends BaseCommand[SubmitReassignmentRequest, SubmitReassignmentResponse, Unit] { + override def createRequest(): Either[String, SubmitReassignmentRequest] = Right( + SubmitReassignmentRequest( + Some( + ReassignmentCommand( + workflowId = workflowId, + applicationId = applicationId, + commandId = commandId, + submitter = submitter.toString, + command = ReassignmentCommand.Command.UnassignCommand( + UnassignCommand( + contractId = contractId.coid.toString, + source = source.toProtoPrimitive, + target = target.toProtoPrimitive, + ) + ), + submissionId = submissionId, + ) + ) + ) + ) + + override def submitRequest( + service: CommandSubmissionServiceStub, + request: SubmitReassignmentRequest, + ): Future[SubmitReassignmentResponse] = { + service.submitReassignment(request) + } + + override def handleResponse(response: SubmitReassignmentResponse): Either[String, Unit] = + Right(()) + } + } + + object CommandService { + trait BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = CommandServiceStub + override def createService(channel: ManagedChannel): CommandServiceStub = + CommandServiceGrpc.stub(channel) + } + + final case class SubmitAndWaitTransactionTree( + override val actAs: Seq[LfPartyId], + override val readAs: Seq[LfPartyId], + override val commands: Seq[Command], + override val workflowId: String, + override val commandId: String, + override val deduplicationPeriod: Option[DeduplicationPeriod], + override val submissionId: String, + override val minLedgerTimeAbs: Option[Instant], + override val disclosedContracts: Seq[DisclosedContract], + override val domainId: DomainId, + override val applicationId: String, + ) extends SubmitCommand + with BaseCommand[ + SubmitAndWaitRequest, + SubmitAndWaitForTransactionTreeResponse, + TransactionTree, + ] { + + override def createRequest(): Either[String, SubmitAndWaitRequest] = + Right(SubmitAndWaitRequest(commands = Some(mkCommand))) + + override def submitRequest( + service: CommandServiceStub, + request: SubmitAndWaitRequest, + ): Future[SubmitAndWaitForTransactionTreeResponse] = + service.submitAndWaitForTransactionTree(request) + + override def handleResponse( + response: SubmitAndWaitForTransactionTreeResponse + ): Either[String, TransactionTree] = + response.transaction.toRight("Received response without any transaction tree") + + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + + final case class SubmitAndWaitTransaction( + override val actAs: Seq[LfPartyId], + override val readAs: Seq[LfPartyId], + override val commands: Seq[Command], + override val workflowId: String, + override val commandId: String, + override val deduplicationPeriod: Option[DeduplicationPeriod], + override val submissionId: String, + override val minLedgerTimeAbs: Option[Instant], + override val disclosedContracts: Seq[DisclosedContract], + override val domainId: DomainId, + override val applicationId: String, + ) extends SubmitCommand + with BaseCommand[SubmitAndWaitRequest, SubmitAndWaitForTransactionResponse, Transaction] { + + override def createRequest(): Either[String, SubmitAndWaitRequest] = + Right(SubmitAndWaitRequest(commands = Some(mkCommand))) + + override def submitRequest( + service: CommandServiceStub, + request: SubmitAndWaitRequest, + ): Future[SubmitAndWaitForTransactionResponse] = + service.submitAndWaitForTransaction(request) + + override def handleResponse( + response: SubmitAndWaitForTransactionResponse + ): Either[String, Transaction] = + response.transaction.toRight("Received response without any transaction") + + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + } + + object StateService { + abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = StateServiceStub + + override def createService(channel: ManagedChannel): StateServiceStub = + StateServiceGrpc.stub(channel) + } + + final case class LedgerEnd() + extends BaseCommand[GetLedgerEndRequest, GetLedgerEndResponse, ParticipantOffset] { + + override def createRequest(): Either[String, GetLedgerEndRequest] = + Right(GetLedgerEndRequest()) + + override def submitRequest( + service: StateServiceStub, + request: GetLedgerEndRequest, + ): Future[GetLedgerEndResponse] = + service.getLedgerEnd(request) + + override def handleResponse( + response: GetLedgerEndResponse + ): Either[String, ParticipantOffset] = + response.offset.toRight("Empty LedgerEndResponse received without offset") + } + + final case class GetConnectedDomains(partyId: LfPartyId) + extends BaseCommand[ + GetConnectedDomainsRequest, + GetConnectedDomainsResponse, + GetConnectedDomainsResponse, + ] { + + override def createRequest(): Either[String, GetConnectedDomainsRequest] = + Right(GetConnectedDomainsRequest(partyId.toString)) + + override def submitRequest( + service: StateServiceStub, + request: GetConnectedDomainsRequest, + ): Future[GetConnectedDomainsResponse] = + service.getConnectedDomains(request) + + override def handleResponse( + response: GetConnectedDomainsResponse + ): Either[String, GetConnectedDomainsResponse] = + Right(response) + } + + final case class GetActiveContracts( + parties: Set[LfPartyId], + limit: PositiveInt, + templateFilter: Seq[TemplateId] = Seq.empty, + activeAtOffset: String = "", + verbose: Boolean = true, + timeout: FiniteDuration, + includeCreatedEventBlob: Boolean = false, + )(scheduler: ScheduledExecutorService) + extends BaseCommand[GetActiveContractsRequest, Seq[GetActiveContractsResponse], Seq[ + GetActiveContractsResponse + ]] { + + override def createRequest(): Either[String, GetActiveContractsRequest] = { + val filter = + if (templateFilter.nonEmpty) { + Filters( + Some( + InclusiveFilters(templateFilters = + templateFilter.map(tId => + TemplateFilter(Some(tId.toIdentifier), includeCreatedEventBlob) + ) + ) + ) + ) + } else Filters.defaultInstance + Right( + GetActiveContractsRequest( + filter = Some(TransactionFilter(parties.map((_, filter)).toMap)), + verbose = verbose, + activeAtOffset = activeAtOffset, + ) + ) + } + + override def submitRequest( + service: StateServiceStub, + request: GetActiveContractsRequest, + ): Future[Seq[GetActiveContractsResponse]] = { + GrpcAdminCommand.streamedResponse[ + GetActiveContractsRequest, + GetActiveContractsResponse, + GetActiveContractsResponse, + ]( + service.getActiveContracts, + List(_), + request, + limit.value, + timeout, + scheduler, + ) + } + + override def handleResponse( + response: Seq[GetActiveContractsResponse] + ): Either[String, Seq[GetActiveContractsResponse]] = { + Right(response) + } + + // fetching ACS might take long if we fetch a lot of data + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + } + + final case class CompletionWrapper( + completion: Completion, + checkpoint: Checkpoint, + domainId: DomainId, + ) + + object CommandCompletionService { + abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = CommandCompletionServiceStub + + override def createService(channel: ManagedChannel): CommandCompletionServiceStub = + CommandCompletionServiceGrpc.stub(channel) + } + + final case class CompletionRequest( + partyId: LfPartyId, + beginOffset: ParticipantOffset, + expectedCompletions: Int, + timeout: java.time.Duration, + applicationId: String, + )(filter: CompletionWrapper => Boolean, scheduler: ScheduledExecutorService) + extends BaseCommand[ + CompletionStreamRequest, + Seq[CompletionWrapper], + Seq[CompletionWrapper], + ] { + + override def createRequest(): Either[String, CompletionStreamRequest] = + Right( + CompletionStreamRequest( + applicationId = applicationId, + parties = Seq(partyId), + beginExclusive = Some(beginOffset), + ) + ) + + override def submitRequest( + service: CommandCompletionServiceStub, + request: CompletionStreamRequest, + ): Future[Seq[CompletionWrapper]] = { + import scala.jdk.DurationConverters.* + GrpcAdminCommand + .streamedResponse[CompletionStreamRequest, CompletionStreamResponse, CompletionWrapper]( + service.completionStream, + response => + List( + CompletionWrapper( + completion = response.completion.getOrElse( + throw new IllegalStateException("Completion should be present.") + ), + checkpoint = response.checkpoint.getOrElse( + throw new IllegalStateException("Checkpoint should be present.") + ), + domainId = DomainId.tryFromString(response.domainId), + ) + ).filter(filter), + request, + expectedCompletions, + timeout.toScala, + scheduler, + ) + } + + override def handleResponse( + response: Seq[CompletionWrapper] + ): Either[String, Seq[CompletionWrapper]] = + Right(response) + + override def timeoutType: TimeoutType = ServerEnforcedTimeout + } + + final case class Subscribe( + observer: StreamObserver[CompletionWrapper], + parties: Seq[String], + offset: Option[ParticipantOffset], + applicationId: String, + )(implicit loggingContext: ErrorLoggingContext) + extends BaseCommand[CompletionStreamRequest, AutoCloseable, AutoCloseable] { + // The subscription should never be cut short because of a gRPC timeout + override def timeoutType: TimeoutType = ServerEnforcedTimeout + + override def createRequest(): Either[String, CompletionStreamRequest] = Right { + CompletionStreamRequest( + applicationId = applicationId, + parties = parties, + beginExclusive = offset, + ) + } + + override def submitRequest( + service: CommandCompletionServiceStub, + request: CompletionStreamRequest, + ): Future[AutoCloseable] = { + val rawObserver = new ForwardingStreamObserver[CompletionStreamResponse, CompletionWrapper]( + observer, + response => + List( + CompletionWrapper( + completion = response.completion.getOrElse( + throw new IllegalStateException("Completion should be present.") + ), + checkpoint = response.checkpoint.getOrElse( + throw new IllegalStateException("Checkpoint should be present.") + ), + domainId = DomainId.tryFromString(response.domainId), + ) + ), + ) + val context = Context.current().withCancellation() + context.run(() => service.completionStream(request, rawObserver)) + Future.successful(context) + } + + override def handleResponse(response: AutoCloseable): Either[String, AutoCloseable] = Right( + response + ) + } + } + + object Time { + abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = TimeServiceStub + + override def createService(channel: ManagedChannel): TimeServiceStub = + TimeServiceGrpc.stub(channel) + } + + final object Get + extends BaseCommand[ + GetTimeRequest, + GetTimeResponse, + CantonTimestamp, + ] { + + override def submitRequest( + service: TimeServiceStub, + request: GetTimeRequest, + ): Future[GetTimeResponse] = { + service.getTime(request) + } + + /** Create the request from configured options + */ + override def createRequest(): Either[String, GetTimeRequest] = Right(GetTimeRequest()) + + /** Handle the response the service has provided + */ + override def handleResponse( + response: GetTimeResponse + ): Either[String, CantonTimestamp] = + for { + prototTimestamp <- response.currentTime.map(Right(_)).getOrElse(Left("currentTime empty")) + result <- CantonTimestamp.fromProtoPrimitive(prototTimestamp).left.map(_.message) + } yield result + } + + final case class Set(currentTime: CantonTimestamp, newTime: CantonTimestamp) + extends BaseCommand[ + SetTimeRequest, + Empty, + Unit, + ] { + + override def submitRequest(service: TimeServiceStub, request: SetTimeRequest): Future[Empty] = + service.setTime(request) + + override def createRequest(): Either[String, SetTimeRequest] = + Right( + SetTimeRequest( + currentTime = Some(currentTime.toProtoPrimitive), + newTime = Some(newTime.toProtoPrimitive), + ) + ) + + /** Handle the response the service has provided + */ + override def handleResponse(response: Empty): Either[String, Unit] = Right(()) + + } + + } + + object QueryService { + + abstract class BaseCommand[Req, Res] extends GrpcAdminCommand[Req, Res, Res] { + override type Svc = EventQueryServiceStub + + override def createService(channel: ManagedChannel): EventQueryServiceStub = + EventQueryServiceGrpc.stub(channel) + + override def handleResponse(response: Res): Either[String, Res] = Right(response) + } + + final case class GetEventsByContractId( + contractId: String, + requestingParties: Seq[String], + ) extends BaseCommand[ + GetEventsByContractIdRequest, + GetEventsByContractIdResponse, + ] { + + override def createRequest(): Either[String, GetEventsByContractIdRequest] = Right( + GetEventsByContractIdRequest( + contractId = contractId, + requestingParties = requestingParties, + ) + ) + + override def submitRequest( + service: EventQueryServiceStub, + request: GetEventsByContractIdRequest, + ): Future[GetEventsByContractIdResponse] = service.getEventsByContractId(request) + + } + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala new file mode 100644 index 0000000000..a3704dbfb0 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala @@ -0,0 +1,1223 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.commands + +import cats.implicits.* +import com.daml.ledger.api.v1.ledger_offset.LedgerOffset +import com.daml.ledger.api.v1.ledger_offset.LedgerOffset.Value +import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{ + DefaultUnboundedTimeout, + ServerEnforcedTimeout, + TimeoutType, +} +import com.digitalasset.canton.admin.api.client.data.{ + DarMetadata, + ListConnectedDomainsResult, + ParticipantPruningSchedule, +} +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.TracedLogger +import com.digitalasset.canton.participant.admin.grpc.{ + GrpcParticipantRepairService, + TransferSearchResult, +} +import com.digitalasset.canton.participant.admin.v0.DomainConnectivityServiceGrpc.DomainConnectivityServiceStub +import com.digitalasset.canton.participant.admin.v0.EnterpriseParticipantReplicationServiceGrpc.EnterpriseParticipantReplicationServiceStub +import com.digitalasset.canton.participant.admin.v0.InspectionServiceGrpc.InspectionServiceStub +import com.digitalasset.canton.participant.admin.v0.PackageServiceGrpc.PackageServiceStub +import com.digitalasset.canton.participant.admin.v0.ParticipantRepairServiceGrpc.ParticipantRepairServiceStub +import com.digitalasset.canton.participant.admin.v0.PartyNameManagementServiceGrpc.PartyNameManagementServiceStub +import com.digitalasset.canton.participant.admin.v0.PingServiceGrpc.PingServiceStub +import com.digitalasset.canton.participant.admin.v0.PruningServiceGrpc.PruningServiceStub +import com.digitalasset.canton.participant.admin.v0.ResourceManagementServiceGrpc.ResourceManagementServiceStub +import com.digitalasset.canton.participant.admin.v0.TransferServiceGrpc.TransferServiceStub +import com.digitalasset.canton.participant.admin.v0.{ResourceLimits as _, *} +import com.digitalasset.canton.participant.admin.{ResourceLimits, v0} +import com.digitalasset.canton.participant.domain.DomainConnectionConfig as CDomainConnectionConfig +import com.digitalasset.canton.participant.sync.UpstreamOffsetConvert +import com.digitalasset.canton.protocol.{LfContractId, TransferId, v0 as v0proto} +import com.digitalasset.canton.pruning.admin +import com.digitalasset.canton.serialization.ProtoConverter.InstantConverter +import com.digitalasset.canton.topology.{DomainId, PartyId} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.traffic.MemberTrafficStatus +import com.digitalasset.canton.util.BinaryFileUtil +import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.{DomainAlias, LedgerApplicationId, LedgerTransactionId, config} +import com.google.protobuf.ByteString +import com.google.protobuf.empty.Empty +import com.google.protobuf.timestamp.Timestamp +import io.grpc.Context.CancellableContext +import io.grpc.stub.StreamObserver +import io.grpc.{Context, ManagedChannel} + +import java.io.IOException +import java.nio.file.{Files, Path, Paths} +import java.time.Instant +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.duration.{Duration, MILLISECONDS} +import scala.concurrent.{Future, Promise, blocking} + +object ParticipantAdminCommands { + + /** Daml Package Management Commands + */ + object Package { + + sealed trait PackageCommand[Req, Res, Result] extends GrpcAdminCommand[Req, Res, Result] { + override type Svc = PackageServiceStub + + override def createService(channel: ManagedChannel): PackageServiceStub = + PackageServiceGrpc.stub(channel) + } + + final case class List(limit: PositiveInt) + extends PackageCommand[ListPackagesRequest, ListPackagesResponse, Seq[PackageDescription]] { + override def createRequest() = Right(ListPackagesRequest(limit.value)) + + override def submitRequest( + service: PackageServiceStub, + request: ListPackagesRequest, + ): Future[ListPackagesResponse] = + service.listPackages(request) + + override def handleResponse( + response: ListPackagesResponse + ): Either[String, Seq[PackageDescription]] = + Right(response.packageDescriptions) + } + + final case class ListContents(packageId: String) + extends PackageCommand[ListPackageContentsRequest, ListPackageContentsResponse, Seq[ + ModuleDescription + ]] { + override def createRequest() = Right(ListPackageContentsRequest(packageId)) + + override def submitRequest( + service: PackageServiceStub, + request: ListPackageContentsRequest, + ): Future[ListPackageContentsResponse] = + service.listPackageContents(request) + + override def handleResponse( + response: ListPackageContentsResponse + ): Either[String, Seq[ModuleDescription]] = + Right(response.modules) + } + + final case class UploadDar( + darPath: Option[String], + vetAllPackages: Boolean, + synchronizeVetting: Boolean, + logger: TracedLogger, + ) extends PackageCommand[UploadDarRequest, UploadDarResponse, String] { + + override def createRequest(): Either[String, UploadDarRequest] = + for { + pathValue <- darPath.toRight("DAR path not provided") + nonEmptyPathValue <- Either.cond( + pathValue.nonEmpty, + pathValue, + "Provided DAR path is empty", + ) + filename = Paths.get(nonEmptyPathValue).getFileName.toString + darData <- BinaryFileUtil.readByteStringFromFile(nonEmptyPathValue) + } yield UploadDarRequest( + darData, + filename, + vetAllPackages = vetAllPackages, + synchronizeVetting = synchronizeVetting, + ) + + override def submitRequest( + service: PackageServiceStub, + request: UploadDarRequest, + ): Future[UploadDarResponse] = + service.uploadDar(request) + + override def handleResponse(response: UploadDarResponse): Either[String, String] = + response.value match { + case UploadDarResponse.Value.Success(UploadDarResponse.Success(hash)) => Right(hash) + case UploadDarResponse.Value.Failure(UploadDarResponse.Failure(msg)) => Left(msg) + case UploadDarResponse.Value.Empty => Left("unexpected empty response") + } + + // file can be big. checking & vetting might take a while + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + + final case class RemovePackage( + packageId: String, + force: Boolean, + ) extends PackageCommand[RemovePackageRequest, RemovePackageResponse, Unit] { + + override def createRequest() = Right(RemovePackageRequest(packageId, force)) + + override def submitRequest( + service: PackageServiceStub, + request: RemovePackageRequest, + ): Future[RemovePackageResponse] = + service.removePackage(request) + + override def handleResponse( + response: RemovePackageResponse + ): Either[String, Unit] = { + response.success match { + case None => Left("unexpected empty response") + case Some(_success) => Right(()) + } + } + + } + + final case class GetDar( + darHash: Option[String], + destinationDirectory: Option[String], + logger: TracedLogger, + ) extends PackageCommand[GetDarRequest, GetDarResponse, Path] { + override def createRequest(): Either[String, GetDarRequest] = + for { + _ <- destinationDirectory.toRight("DAR destination directory not provided") + hash <- darHash.toRight("DAR hash not provided") + } yield GetDarRequest(hash) + + override def submitRequest( + service: PackageServiceStub, + request: GetDarRequest, + ): Future[GetDarResponse] = + service.getDar(request) + + override def handleResponse(response: GetDarResponse): Either[String, Path] = + for { + directory <- destinationDirectory.toRight("DAR directory not provided") + data <- if (response.data.isEmpty) Left("DAR was not found") else Right(response.data) + path <- + try { + val path = Paths.get(directory, s"${response.name}.dar") + Files.write(path, data.toByteArray) + Right(path) + } catch { + case ex: IOException => + // the trace context for admin commands is started by the submit-request call + // however we can't get at it here + logger.debug(s"Error saving DAR to $directory: $ex")(TraceContext.empty) + Left(s"Error saving DAR to $directory") + } + } yield path + + // might be a big file to download + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + + final case class ListDarContents(darId: String) + extends PackageCommand[ListDarContentsRequest, ListDarContentsResponse, DarMetadata] { + override def createRequest() = Right(ListDarContentsRequest(darId)) + + override def submitRequest( + service: PackageServiceStub, + request: ListDarContentsRequest, + ): Future[ListDarContentsResponse] = + service.listDarContents(request) + + override def handleResponse( + response: ListDarContentsResponse + ): Either[String, DarMetadata] = + DarMetadata.fromProtoV0(response).leftMap(_.toString) + + } + + final case class RemoveDar( + darHash: String + ) extends PackageCommand[RemoveDarRequest, RemoveDarResponse, Unit] { + + override def createRequest() = Right(RemoveDarRequest(darHash)) + + override def submitRequest( + service: PackageServiceStub, + request: RemoveDarRequest, + ): Future[RemoveDarResponse] = + service.removeDar(request) + + override def handleResponse( + response: RemoveDarResponse + ): Either[String, Unit] = { + response.success match { + case None => Left("unexpected empty response") + case Some(success) => Right(()) + } + } + + } + + final case class VetDar(darDash: String, synchronize: Boolean) + extends PackageCommand[VetDarRequest, VetDarResponse, Unit] { + override def createRequest(): Either[String, VetDarRequest] = Right( + VetDarRequest(darDash, synchronize) + ) + + override def submitRequest( + service: PackageServiceStub, + request: VetDarRequest, + ): Future[VetDarResponse] = service.vetDar(request) + + override def handleResponse(response: VetDarResponse): Either[String, Unit] = + Right(()) + } + + // TODO(#14432): Add `synchronize` flag which makes the call block until the unvetting operation + // is observed by the participant on all connected domains. + final case class UnvetDar(darDash: String) + extends PackageCommand[UnvetDarRequest, UnvetDarResponse, Unit] { + + override def createRequest(): Either[String, UnvetDarRequest] = Right( + UnvetDarRequest(darDash) + ) + + override def submitRequest( + service: PackageServiceStub, + request: UnvetDarRequest, + ): Future[UnvetDarResponse] = service.unvetDar(request) + + override def handleResponse(response: UnvetDarResponse): Either[String, Unit] = + Right(()) + } + + final case class ListDars(limit: PositiveInt) + extends PackageCommand[ListDarsRequest, ListDarsResponse, Seq[DarDescription]] { + override def createRequest(): Either[String, ListDarsRequest] = Right( + ListDarsRequest(limit.value) + ) + + override def submitRequest( + service: PackageServiceStub, + request: ListDarsRequest, + ): Future[ListDarsResponse] = + service.listDars(request) + + override def handleResponse(response: ListDarsResponse): Either[String, Seq[DarDescription]] = + Right(response.dars) + } + + } + + object PartyNameManagement { + + final case class SetPartyDisplayName(partyId: PartyId, displayName: String) + extends GrpcAdminCommand[SetPartyDisplayNameRequest, SetPartyDisplayNameResponse, Unit] { + override type Svc = PartyNameManagementServiceStub + + override def createService(channel: ManagedChannel): PartyNameManagementServiceStub = + PartyNameManagementServiceGrpc.stub(channel) + + override def createRequest(): Either[String, SetPartyDisplayNameRequest] = + Right( + SetPartyDisplayNameRequest( + partyId = partyId.uid.toProtoPrimitive, + displayName = displayName, + ) + ) + + override def submitRequest( + service: PartyNameManagementServiceStub, + request: SetPartyDisplayNameRequest, + ): Future[SetPartyDisplayNameResponse] = + service.setPartyDisplayName(request) + + override def handleResponse(response: SetPartyDisplayNameResponse): Either[String, Unit] = + Right(()) + + } + + } + + object ParticipantRepairManagement { + + sealed trait StreamingMachinery[Req, Resp] { + def stream( + load: StreamObserver[Resp] => StreamObserver[Req], + requestBuilder: Array[Byte] => Req, + snapshot: ByteString, + ): Future[Resp] = { + val requestComplete = Promise[Resp]() + val ref = new AtomicReference[Option[Resp]](None) + + val responseObserver = new StreamObserver[Resp] { + override def onNext(value: Resp): Unit = { + ref.set(Some(value)) + } + + override def onError(t: Throwable): Unit = requestComplete.failure(t) + + override def onCompleted(): Unit = { + ref.get() match { + case Some(response) => requestComplete.success(response) + case None => + requestComplete.failure( + io.grpc.Status.CANCELLED + .withDescription("Server completed the request before providing a response") + .asRuntimeException() + ) + } + + } + } + val requestObserver = load(responseObserver) + + snapshot.toByteArray + .grouped(GrpcParticipantRepairService.DefaultChunkSize.value) + .foreach { bytes => + blocking { + requestObserver.onNext(requestBuilder(bytes)) + } + } + requestObserver.onCompleted() + requestComplete.future + } + } + + final case class ExportAcs( + parties: Set[PartyId], + filterDomainId: Option[DomainId], + timestamp: Option[Instant], + observer: StreamObserver[ExportAcsResponse], + contractDomainRenames: Map[DomainId, (DomainId, ProtocolVersion)], + ) extends GrpcAdminCommand[ + ExportAcsRequest, + CancellableContext, + CancellableContext, + ] { + + override type Svc = ParticipantRepairServiceStub + + override def createService(channel: ManagedChannel): ParticipantRepairServiceStub = + ParticipantRepairServiceGrpc.stub(channel) + + override def createRequest(): Either[String, ExportAcsRequest] = { + Right( + ExportAcsRequest( + parties.map(_.toLf).toSeq, + filterDomainId.map(_.toProtoPrimitive).getOrElse(""), + timestamp.map(Timestamp.apply), + contractDomainRenames.map { case (source, (targetDomainId, targetProtocolVersion)) => + val targetDomain = ExportAcsRequest.TargetDomain( + domainId = targetDomainId.toProtoPrimitive, + protocolVersion = targetProtocolVersion.toProtoPrimitive, + ) + + (source.toProtoPrimitive, targetDomain) + }, + ) + ) + } + + override def submitRequest( + service: ParticipantRepairServiceStub, + request: ExportAcsRequest, + ): Future[CancellableContext] = { + val context = Context.current().withCancellation() + context.run(() => service.exportAcs(request, observer)) + Future.successful(context) + } + + override def handleResponse( + response: CancellableContext + ): Either[String, CancellableContext] = Right(response) + + } + + final case class ImportAcs(acsChunk: ByteString, workflowIdPrefix: String) + extends GrpcAdminCommand[ImportAcsRequest, ImportAcsResponse, Unit] + with StreamingMachinery[ImportAcsRequest, ImportAcsResponse] { + + override type Svc = ParticipantRepairServiceStub + + override def createService(channel: ManagedChannel): ParticipantRepairServiceStub = + ParticipantRepairServiceGrpc.stub(channel) + + override def createRequest(): Either[String, ImportAcsRequest] = { + Right(ImportAcsRequest(acsChunk, workflowIdPrefix)) + } + + override def submitRequest( + service: ParticipantRepairServiceStub, + request: ImportAcsRequest, + ): Future[ImportAcsResponse] = { + stream( + service.importAcs, + (bytes: Array[Byte]) => ImportAcsRequest(ByteString.copyFrom(bytes), workflowIdPrefix), + request.acsSnapshot, + ) + } + + override def handleResponse(response: ImportAcsResponse): Either[String, Unit] = { + Right(()) + } + } + + final case class PurgeContracts( + domain: DomainAlias, + contracts: Seq[LfContractId], + ignoreAlreadyPurged: Boolean, + ) extends GrpcAdminCommand[PurgeContractsRequest, PurgeContractsResponse, Unit] { + + override type Svc = ParticipantRepairServiceStub + + override def createService(channel: ManagedChannel): ParticipantRepairServiceStub = + ParticipantRepairServiceGrpc.stub(channel) + + override def createRequest(): Either[String, PurgeContractsRequest] = { + Right( + PurgeContractsRequest( + domain = domain.toProtoPrimitive, + contractIds = contracts.map(_.coid), + ignoreAlreadyPurged = ignoreAlreadyPurged, + ) + ) + } + + override def submitRequest( + service: ParticipantRepairServiceStub, + request: PurgeContractsRequest, + ): Future[PurgeContractsResponse] = service.purgeContracts(request) + + override def handleResponse(response: PurgeContractsResponse): Either[String, Unit] = + Right(()) + } + + final case class MigrateDomain( + sourceDomainAlias: DomainAlias, + targetDomainConfig: CDomainConnectionConfig, + ) extends GrpcAdminCommand[MigrateDomainRequest, MigrateDomainResponse, Unit] { + override type Svc = ParticipantRepairServiceStub + + override def createService(channel: ManagedChannel): ParticipantRepairServiceStub = + ParticipantRepairServiceGrpc.stub(channel) + + override def submitRequest( + service: ParticipantRepairServiceStub, + request: MigrateDomainRequest, + ): Future[MigrateDomainResponse] = service.migrateDomain(request) + + override def createRequest(): Either[String, MigrateDomainRequest] = + Right( + MigrateDomainRequest( + sourceDomainAlias.toProtoPrimitive, + Some(targetDomainConfig.toProtoV0), + ) + ) + + override def handleResponse(response: MigrateDomainResponse): Either[String, Unit] = Right(()) + + // migration command will potentially take a long time + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + } + + object Ping { + + final case class Ping( + targets: Set[String], + validators: Set[String], + timeoutMillis: Long, + levels: Long, + gracePeriodMillis: Long, + workflowId: String, + id: String, + ) extends GrpcAdminCommand[PingRequest, PingResponse, Option[Duration]] { + override type Svc = PingServiceStub + + override def createService(channel: ManagedChannel): PingServiceStub = + PingServiceGrpc.stub(channel) + + override def createRequest(): Either[String, PingRequest] = + Right( + PingRequest( + targets.toSeq, + validators.toSeq, + timeoutMillis, + levels, + gracePeriodMillis, + workflowId, + id, + ) + ) + + override def submitRequest( + service: PingServiceStub, + request: PingRequest, + ): Future[PingResponse] = + service.ping(request) + + override def handleResponse(response: PingResponse): Either[String, Option[Duration]] = + response.response match { + case PingResponse.Response.Success(PingSuccess(pingTime, responder)) => + Right(Some(Duration(pingTime, MILLISECONDS))) + case PingResponse.Response.Failure(_ex) => Right(None) + case PingResponse.Response.Empty => Left("Ping client: unexpected empty response") + } + + override def timeoutType: TimeoutType = ServerEnforcedTimeout + } + + } + + object DomainConnectivity { + + abstract class Base[Req, Res, Ret] extends GrpcAdminCommand[Req, Res, Ret] { + override type Svc = DomainConnectivityServiceStub + + override def createService(channel: ManagedChannel): DomainConnectivityServiceStub = + DomainConnectivityServiceGrpc.stub(channel) + } + + final case class ReconnectDomains(ignoreFailures: Boolean) + extends Base[ReconnectDomainsRequest, ReconnectDomainsResponse, Unit] { + + override def createRequest(): Either[String, ReconnectDomainsRequest] = + Right(ReconnectDomainsRequest(ignoreFailures = ignoreFailures)) + + override def submitRequest( + service: DomainConnectivityServiceStub, + request: ReconnectDomainsRequest, + ): Future[ReconnectDomainsResponse] = + service.reconnectDomains(request) + + override def handleResponse(response: ReconnectDomainsResponse): Either[String, Unit] = Right( + () + ) + + // depending on the confirmation timeout and the load, this might take a bit longer + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + + final case class ConnectDomain(domainAlias: DomainAlias, retry: Boolean) + extends Base[ConnectDomainRequest, ConnectDomainResponse, Boolean] { + + override def createRequest(): Either[String, ConnectDomainRequest] = + Right(ConnectDomainRequest(domainAlias.toProtoPrimitive, retry)) + + override def submitRequest( + service: DomainConnectivityServiceStub, + request: ConnectDomainRequest, + ): Future[ConnectDomainResponse] = + service.connectDomain(request) + + override def handleResponse(response: ConnectDomainResponse): Either[String, Boolean] = + Right(response.connectedSuccessfully) + + // can take long if we need to wait to become active + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + + final case class GetDomainId(domainAlias: DomainAlias) + extends Base[GetDomainIdRequest, GetDomainIdResponse, DomainId] { + + override def createRequest(): Either[String, GetDomainIdRequest] = + Right(GetDomainIdRequest(domainAlias.toProtoPrimitive)) + + override def submitRequest( + service: DomainConnectivityServiceStub, + request: GetDomainIdRequest, + ): Future[GetDomainIdResponse] = + service.getDomainId(request) + + override def handleResponse(response: GetDomainIdResponse): Either[String, DomainId] = + DomainId.fromProtoPrimitive(response.domainId, "domain_id").leftMap(_.toString) + } + + final case class DisconnectDomain(domainAlias: DomainAlias) + extends Base[DisconnectDomainRequest, DisconnectDomainResponse, Unit] { + + override def createRequest(): Either[String, DisconnectDomainRequest] = + Right(DisconnectDomainRequest(domainAlias.toProtoPrimitive)) + + override def submitRequest( + service: DomainConnectivityServiceStub, + request: DisconnectDomainRequest, + ): Future[DisconnectDomainResponse] = + service.disconnectDomain(request) + + override def handleResponse(response: DisconnectDomainResponse): Either[String, Unit] = Right( + () + ) + } + + final case class ListConnectedDomains() + extends Base[ListConnectedDomainsRequest, ListConnectedDomainsResponse, Seq[ + ListConnectedDomainsResult + ]] { + + override def createRequest(): Either[String, ListConnectedDomainsRequest] = Right( + ListConnectedDomainsRequest() + ) + + override def submitRequest( + service: DomainConnectivityServiceStub, + request: ListConnectedDomainsRequest, + ): Future[ListConnectedDomainsResponse] = + service.listConnectedDomains(request) + + override def handleResponse( + response: ListConnectedDomainsResponse + ): Either[String, Seq[ListConnectedDomainsResult]] = + response.connectedDomains.traverse( + ListConnectedDomainsResult.fromProtoV0(_).leftMap(_.toString) + ) + + } + + final case object ListConfiguredDomains + extends Base[ListConfiguredDomainsRequest, ListConfiguredDomainsResponse, Seq[ + (CDomainConnectionConfig, Boolean) + ]] { + + override def createRequest(): Either[String, ListConfiguredDomainsRequest] = Right( + ListConfiguredDomainsRequest() + ) + + override def submitRequest( + service: DomainConnectivityServiceStub, + request: ListConfiguredDomainsRequest, + ): Future[ListConfiguredDomainsResponse] = + service.listConfiguredDomains(request) + + override def handleResponse( + response: ListConfiguredDomainsResponse + ): Either[String, Seq[(CDomainConnectionConfig, Boolean)]] = { + + def mapRes( + result: ListConfiguredDomainsResponse.Result + ): Either[String, (CDomainConnectionConfig, Boolean)] = + for { + configP <- result.config.toRight("Server has sent empty config") + config <- CDomainConnectionConfig.fromProtoV0(configP).leftMap(_.toString) + } yield (config, result.connected) + + response.results.traverse(mapRes) + } + } + + final case class RegisterDomain(config: CDomainConnectionConfig) + extends Base[RegisterDomainRequest, RegisterDomainResponse, Unit] { + + override def createRequest(): Either[String, RegisterDomainRequest] = + Right(RegisterDomainRequest(add = Some(config.toProtoV0))) + + override def submitRequest( + service: DomainConnectivityServiceStub, + request: RegisterDomainRequest, + ): Future[RegisterDomainResponse] = + service.registerDomain(request) + + override def handleResponse(response: RegisterDomainResponse): Either[String, Unit] = Right( + () + ) + + // can take long if we need to wait to become active + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + + final case class ModifyDomainConnection(config: CDomainConnectionConfig) + extends Base[ModifyDomainRequest, ModifyDomainResponse, Unit] { + + override def createRequest(): Either[String, ModifyDomainRequest] = + Right(ModifyDomainRequest(modify = Some(config.toProtoV0))) + + override def submitRequest( + service: DomainConnectivityServiceStub, + request: ModifyDomainRequest, + ): Future[ModifyDomainResponse] = + service.modifyDomain(request) + + override def handleResponse(response: ModifyDomainResponse): Either[String, Unit] = Right(()) + + } + + final case class GetAgreement(domainAlias: DomainAlias) + extends Base[GetAgreementRequest, GetAgreementResponse, Option[(Agreement, Boolean)]] { + + override def createRequest(): Either[String, GetAgreementRequest] = Right( + GetAgreementRequest(domainAlias.unwrap) + ) + + override def submitRequest( + service: DomainConnectivityServiceStub, + request: GetAgreementRequest, + ): Future[GetAgreementResponse] = + service.getAgreement(request) + + override def handleResponse( + response: GetAgreementResponse + ): Either[String, Option[(Agreement, Boolean)]] = + Right(response.agreement.map(ag => (ag, response.accepted))) + } + + final case class AcceptAgreement(domainAlias: DomainAlias, agreementId: String) + extends Base[AcceptAgreementRequest, AcceptAgreementResponse, Unit] { + + override def createRequest(): Either[String, AcceptAgreementRequest] = + Right(AcceptAgreementRequest(domainAlias.unwrap, agreementId)) + + override def submitRequest( + service: DomainConnectivityServiceStub, + request: AcceptAgreementRequest, + ): Future[AcceptAgreementResponse] = + service.acceptAgreement(request) + + override def handleResponse(response: AcceptAgreementResponse): Either[String, Unit] = Right( + () + ) + } + + } + + object Transfer { + + abstract class Base[Req, Res, Ret] extends GrpcAdminCommand[Req, Res, Ret] { + override type Svc = TransferServiceStub + + override def createService(channel: ManagedChannel): TransferServiceStub = + TransferServiceGrpc.stub(channel) + } + + final case class TransferOut( + submittingParty: PartyId, + contractId: LfContractId, + sourceDomain: DomainAlias, + targetDomain: DomainAlias, + applicationId: LedgerApplicationId, + submissionId: String, + workflowId: String, + commandId: String, + ) extends Base[AdminTransferOutRequest, AdminTransferOutResponse, TransferId] { + override def createRequest(): Either[String, AdminTransferOutRequest] = + Right( + AdminTransferOutRequest( + submittingParty = submittingParty.toLf, + originDomain = sourceDomain.toProtoPrimitive, + targetDomain = targetDomain.toProtoPrimitive, + contractId = contractId.coid, + applicationId = applicationId, + submissionId = submissionId, + workflowId = workflowId, + commandId = commandId, + ) + ) + + override def submitRequest( + service: TransferServiceStub, + request: AdminTransferOutRequest, + ): Future[AdminTransferOutResponse] = + service.transferOut(request) + + override def handleResponse(response: AdminTransferOutResponse): Either[String, TransferId] = + response match { + case AdminTransferOutResponse(Some(transferIdP)) => + TransferId.fromProtoV0(transferIdP).leftMap(_.toString) + case AdminTransferOutResponse(None) => Left("Empty TransferOutResponse") + } + } + + final case class TransferIn( + submittingParty: PartyId, + transferId: v0proto.TransferId, + targetDomain: DomainAlias, + applicationId: LedgerApplicationId, + submissionId: String, + workflowId: String, + commandId: String, + ) extends Base[AdminTransferInRequest, AdminTransferInResponse, Unit] { + + override def createRequest(): Either[String, AdminTransferInRequest] = + Right( + AdminTransferInRequest( + submittingPartyId = submittingParty.toLf, + transferId = Some(transferId), + targetDomain = targetDomain.toProtoPrimitive, + applicationId = applicationId, + submissionId = submissionId, + workflowId = workflowId, + commandId = commandId, + ) + ) + + override def submitRequest( + service: TransferServiceStub, + request: AdminTransferInRequest, + ): Future[AdminTransferInResponse] = + service.transferIn(request) + + override def handleResponse(response: AdminTransferInResponse): Either[String, Unit] = Right( + () + ) + + } + + final case class TransferSearch( + targetDomain: DomainAlias, + sourceDomainFilter: Option[DomainAlias], + timestampFilter: Option[Instant], + submittingPartyFilter: Option[PartyId], + limit0: Int, + ) extends Base[ + AdminTransferSearchQuery, + AdminTransferSearchResponse, + Seq[TransferSearchResult], + ] { + + override def createRequest(): Either[String, AdminTransferSearchQuery] = + Right( + AdminTransferSearchQuery( + searchDomain = targetDomain.toProtoPrimitive, + filterOriginDomain = sourceDomainFilter.map(_.toProtoPrimitive).getOrElse(""), + filterTimestamp = + timestampFilter.map((value: Instant) => InstantConverter.toProtoPrimitive(value)), + filterSubmittingParty = submittingPartyFilter.fold("")(_.toLf), + limit = limit0.toLong, + ) + ) + + override def submitRequest( + service: TransferServiceStub, + request: AdminTransferSearchQuery, + ): Future[AdminTransferSearchResponse] = + service.transferSearch(request) + + override def handleResponse( + response: AdminTransferSearchResponse + ): Either[String, Seq[TransferSearchResult]] = + response match { + case AdminTransferSearchResponse(results) => + results.traverse(TransferSearchResult.fromProtoV0).leftMap(_.toString) + } + + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + } + + object Resources { + abstract class Base[Req, Res, Ret] extends GrpcAdminCommand[Req, Res, Ret] { + override type Svc = ResourceManagementServiceStub + + override def createService(channel: ManagedChannel): ResourceManagementServiceStub = + ResourceManagementServiceGrpc.stub(channel) + } + + final case class GetResourceLimits() extends Base[Empty, v0.ResourceLimits, ResourceLimits] { + override def createRequest(): Either[String, Empty] = Right(Empty()) + + override def submitRequest( + service: ResourceManagementServiceStub, + request: Empty, + ): Future[v0.ResourceLimits] = + service.getResourceLimits(request) + + override def handleResponse(response: v0.ResourceLimits): Either[String, ResourceLimits] = { + Right(ResourceLimits.fromProtoV0(response)) + } + } + + final case class SetResourceLimits(limits: ResourceLimits) + extends Base[v0.ResourceLimits, Empty, Unit] { + override def createRequest(): Either[String, v0.ResourceLimits] = Right(limits.toProtoV0) + + override def submitRequest( + service: ResourceManagementServiceStub, + request: v0.ResourceLimits, + ): Future[Empty] = + service.updateResourceLimits(request) + + override def handleResponse(response: Empty): Either[String, Unit] = Right(()) + } + } + + object Inspection { + + abstract class Base[Req, Res, Ret] extends GrpcAdminCommand[Req, Res, Ret] { + override type Svc = InspectionServiceStub + + override def createService(channel: ManagedChannel): InspectionServiceStub = + InspectionServiceGrpc.stub(channel) + } + + final case class LookupContractDomain(contractIds: Set[LfContractId]) + extends Base[ + v0.LookupContractDomain.Request, + v0.LookupContractDomain.Response, + Map[LfContractId, String], + ] { + override def createRequest() = Right( + v0.LookupContractDomain.Request(contractIds.toSeq.map(_.coid)) + ) + + override def submitRequest( + service: InspectionServiceStub, + request: v0.LookupContractDomain.Request, + ): Future[v0.LookupContractDomain.Response] = + service.lookupContractDomain(request) + + override def handleResponse( + response: v0.LookupContractDomain.Response + ): Either[String, Map[LfContractId, String]] = Right( + response.results.map { case (id, domain) => + LfContractId.assertFromString(id) -> domain + } + ) + + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + + final case class LookupTransactionDomain(transactionId: LedgerTransactionId) + extends Base[ + v0.LookupTransactionDomain.Request, + v0.LookupTransactionDomain.Response, + DomainId, + ] { + override def createRequest() = Right(v0.LookupTransactionDomain.Request(transactionId)) + + override def submitRequest( + service: InspectionServiceStub, + request: v0.LookupTransactionDomain.Request, + ): Future[v0.LookupTransactionDomain.Response] = + service.lookupTransactionDomain(request) + + override def handleResponse( + response: v0.LookupTransactionDomain.Response + ): Either[String, DomainId] = + DomainId.fromString(response.domainId) + + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + + final case class LookupOffsetByTime(ts: Timestamp) + extends Base[v0.LookupOffsetByTime.Request, v0.LookupOffsetByTime.Response, String] { + override def createRequest() = Right(v0.LookupOffsetByTime.Request(Some(ts))) + + override def submitRequest( + service: InspectionServiceStub, + request: v0.LookupOffsetByTime.Request, + ): Future[v0.LookupOffsetByTime.Response] = + service.lookupOffsetByTime(request) + + override def handleResponse( + response: v0.LookupOffsetByTime.Response + ): Either[String, String] = + Right(response.offset) + } + + final case class LookupOffsetByIndex(index: Long) + extends Base[v0.LookupOffsetByIndex.Request, v0.LookupOffsetByIndex.Response, String] { + override def createRequest() = Right(v0.LookupOffsetByIndex.Request(index)) + + override def submitRequest( + service: InspectionServiceStub, + request: v0.LookupOffsetByIndex.Request, + ): Future[v0.LookupOffsetByIndex.Response] = + service.lookupOffsetByIndex(request) + + override def handleResponse( + response: v0.LookupOffsetByIndex.Response + ): Either[String, String] = + Right(response.offset) + } + + } + + object Pruning { + abstract class Base[Req, Res, Ret] extends GrpcAdminCommand[Req, Res, Ret] { + override type Svc = PruningServiceStub + + override def createService(channel: ManagedChannel): PruningServiceStub = + PruningServiceGrpc.stub(channel) + } + + final case class GetSafePruningOffsetCommand(beforeOrAt: Instant, ledgerEnd: LedgerOffset) + extends Base[v0.GetSafePruningOffsetRequest, v0.GetSafePruningOffsetResponse, Option[ + LedgerOffset + ]] { + + override def createRequest(): Either[String, v0.GetSafePruningOffsetRequest] = + for { + beforeOrAt <- CantonTimestamp.fromInstant(beforeOrAt) + ledgerEnd <- ledgerEnd.value match { + case Value.Absolute(value) => Right(value) + case other => Left(s"Unable to convert ledger_end `$other` to absolute value") + } + } yield v0.GetSafePruningOffsetRequest(Some(beforeOrAt.toProtoPrimitive), ledgerEnd) + + override def submitRequest( + service: PruningServiceStub, + request: v0.GetSafePruningOffsetRequest, + ): Future[v0.GetSafePruningOffsetResponse] = service.getSafePruningOffset(request) + + override def handleResponse( + response: v0.GetSafePruningOffsetResponse + ): Either[String, Option[LedgerOffset]] = response.response match { + case v0.GetSafePruningOffsetResponse.Response.Empty => Left("Unexpected empty response") + + case v0.GetSafePruningOffsetResponse.Response.SafePruningOffset(offset) => + Right(Some(UpstreamOffsetConvert.toLedgerOffset(offset))) + + case v0.GetSafePruningOffsetResponse.Response.NoSafePruningOffset(_) => Right(None) + } + } + + final case class PruneInternallyCommand(pruneUpTo: LedgerOffset) + extends Base[v0.PruneRequest, v0.PruneResponse, Unit] { + override def createRequest() = + pruneUpTo.value.absolute + .toRight("The pruneUpTo ledger offset needs to be absolute") + .map(v0.PruneRequest(_)) + + override def submitRequest( + service: PruningServiceStub, + request: v0.PruneRequest, + ): Future[v0.PruneResponse] = + service.prune(request) + + override def handleResponse(response: v0.PruneResponse): Either[String, Unit] = Right(()) + } + + final case class SetParticipantScheduleCommand( + cron: String, + maxDuration: config.PositiveDurationSeconds, + retention: config.PositiveDurationSeconds, + pruneInternallyOnly: Boolean, + ) extends Base[ + admin.v0.SetParticipantSchedule.Request, + admin.v0.SetParticipantSchedule.Response, + Unit, + ] { + override def createRequest(): Right[String, admin.v0.SetParticipantSchedule.Request] = + Right( + admin.v0.SetParticipantSchedule.Request( + Some( + admin.v0.ParticipantPruningSchedule( + Some( + admin.v0.PruningSchedule( + cron, + Some(maxDuration.toProtoPrimitive), + Some(retention.toProtoPrimitive), + ) + ), + pruneInternallyOnly, + ) + ) + ) + ) + + override def submitRequest( + service: Svc, + request: admin.v0.SetParticipantSchedule.Request, + ): Future[admin.v0.SetParticipantSchedule.Response] = service.setParticipantSchedule(request) + + override def handleResponse( + response: admin.v0.SetParticipantSchedule.Response + ): Either[String, Unit] = + response match { + case admin.v0.SetParticipantSchedule.Response() => Right(()) + } + } + + final case class GetParticipantScheduleCommand() + extends Base[ + admin.v0.GetParticipantSchedule.Request, + admin.v0.GetParticipantSchedule.Response, + Option[ParticipantPruningSchedule], + ] { + override def createRequest(): Right[String, admin.v0.GetParticipantSchedule.Request] = + Right( + admin.v0.GetParticipantSchedule.Request() + ) + + override def submitRequest( + service: Svc, + request: admin.v0.GetParticipantSchedule.Request, + ): Future[admin.v0.GetParticipantSchedule.Response] = service.getParticipantSchedule(request) + + override def handleResponse( + response: admin.v0.GetParticipantSchedule.Response + ): Either[String, Option[ParticipantPruningSchedule]] = + response.schedule.fold( + Right(None): Either[String, Option[ParticipantPruningSchedule]] + )(ParticipantPruningSchedule.fromProtoV0(_).bimap(_.message, Some(_))) + } + } + + object Replication { + + final case class SetPassiveCommand() + extends GrpcAdminCommand[SetPassive.Request, SetPassive.Response, Unit] { + override type Svc = EnterpriseParticipantReplicationServiceStub + + override def createService( + channel: ManagedChannel + ): EnterpriseParticipantReplicationServiceStub = + EnterpriseParticipantReplicationServiceGrpc.stub(channel) + + override def createRequest(): Either[String, SetPassive.Request] = + Right(SetPassive.Request()) + + override def submitRequest( + service: EnterpriseParticipantReplicationServiceStub, + request: SetPassive.Request, + ): Future[SetPassive.Response] = + service.setPassive(request) + + override def handleResponse(response: SetPassive.Response): Either[String, Unit] = + response match { + case SetPassive.Response() => Right(()) + } + } + } + + object TrafficControl { + final case class GetTrafficControlState(domainId: DomainId) + extends GrpcAdminCommand[ + TrafficControlStateRequest, + TrafficControlStateResponse, + MemberTrafficStatus, + ] { + override type Svc = TrafficControlServiceGrpc.TrafficControlServiceStub + + override def createService( + channel: ManagedChannel + ): TrafficControlServiceGrpc.TrafficControlServiceStub = + TrafficControlServiceGrpc.stub(channel) + + override def submitRequest( + service: TrafficControlServiceGrpc.TrafficControlServiceStub, + request: TrafficControlStateRequest, + ): Future[TrafficControlStateResponse] = + service.trafficControlState(request) + + override def createRequest(): Either[String, TrafficControlStateRequest] = Right( + TrafficControlStateRequest(domainId.toProtoPrimitive) + ) + + override def handleResponse( + response: TrafficControlStateResponse + ): Either[String, MemberTrafficStatus] = { + response.trafficState + .map { trafficStatus => + MemberTrafficStatus + .fromProtoV0(trafficStatus) + .leftMap(_.message) + } + .getOrElse(Left("No traffic state available")) + } + } + } + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/PruningSchedulerCommands.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/PruningSchedulerCommands.scala new file mode 100644 index 0000000000..3fd8cec61d --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/PruningSchedulerCommands.scala @@ -0,0 +1,158 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.commands + +import cats.syntax.either.* +import com.digitalasset.canton.admin.api.client.data.PruningSchedule +import com.digitalasset.canton.config.PositiveDurationSeconds +import com.digitalasset.canton.pruning.admin.v0 +import com.digitalasset.canton.pruning.admin.v0.{PruningSchedule as PruningScheduleP, *} +import io.grpc.ManagedChannel +import io.grpc.stub.AbstractStub + +import scala.concurrent.Future + +/** Exposes shared grpc client pruning scheduler commands reusable by participant/mediator/sequencer + * admin api. + * Having to type-parameterize as grpc does not support inheritance and passing in the grpc stub methods in one by one + */ +class PruningSchedulerCommands[Stub <: AbstractStub[Stub]]( + createServiceStub: ManagedChannel => Stub, + submitSetSchedule: (Stub, SetSchedule.Request) => Future[SetSchedule.Response], + submitClearSchedule: (Stub, ClearSchedule.Request) => Future[ClearSchedule.Response], + submitSetCron: (Stub, SetCron.Request) => Future[SetCron.Response], + submitSetMaxDuration: (Stub, v0.SetMaxDuration.Request) => Future[SetMaxDuration.Response], + submitSetRetention: (Stub, SetRetention.Request) => Future[SetRetention.Response], + submitGetSchedule: (Stub, GetSchedule.Request) => Future[GetSchedule.Response], +) { + abstract class BaseCommand[Req, Res, Ret] extends GrpcAdminCommand[Req, Res, Ret] { + override type Svc = Stub + override def createService(channel: ManagedChannel): Svc = createServiceStub(channel) + } + + // case classes not final as the scala compiler can't check outer Svc type reference + case class SetScheduleCommand( + cron: String, + maxDuration: PositiveDurationSeconds, + retention: PositiveDurationSeconds, + ) extends BaseCommand[SetSchedule.Request, SetSchedule.Response, Unit] { + override def createRequest(): Right[String, SetSchedule.Request] = + Right( + SetSchedule.Request( + Some( + PruningScheduleP( + cron, + Some(maxDuration.toProtoPrimitive), + Some(retention.toProtoPrimitive), + ) + ) + ) + ) + + override def submitRequest( + service: Svc, + request: SetSchedule.Request, + ): Future[SetSchedule.Response] = submitSetSchedule(service, request) + + override def handleResponse(response: SetSchedule.Response): Either[String, Unit] = + response match { + case SetSchedule.Response() => Right(()) + } + } + + case class ClearScheduleCommand() + extends BaseCommand[ClearSchedule.Request, ClearSchedule.Response, Unit] { + override def createRequest(): Right[String, ClearSchedule.Request] = + Right(ClearSchedule.Request()) + + override def submitRequest( + service: Svc, + request: ClearSchedule.Request, + ): Future[ClearSchedule.Response] = + submitClearSchedule(service, request) + + override def handleResponse(response: ClearSchedule.Response): Either[String, Unit] = + response match { + case ClearSchedule.Response() => Right(()) + } + } + + case class SetCronCommand(cron: String) + extends BaseCommand[SetCron.Request, SetCron.Response, Unit] { + override def createRequest(): Right[String, SetCron.Request] = + Right(SetCron.Request(cron)) + + override def submitRequest( + service: Svc, + request: SetCron.Request, + ): Future[SetCron.Response] = + submitSetCron(service, request) + + override def handleResponse(response: SetCron.Response): Either[String, Unit] = + response match { + case SetCron.Response() => Right(()) + } + } + + case class SetMaxDurationCommand(maxDuration: PositiveDurationSeconds) + extends BaseCommand[SetMaxDuration.Request, SetMaxDuration.Response, Unit] { + override def createRequest(): Right[String, SetMaxDuration.Request] = + Right( + SetMaxDuration.Request(Some(maxDuration.toProtoPrimitive)) + ) + + override def submitRequest( + service: Svc, + request: SetMaxDuration.Request, + ): Future[SetMaxDuration.Response] = + submitSetMaxDuration(service, request) + + override def handleResponse(response: SetMaxDuration.Response): Either[String, Unit] = + response match { + case SetMaxDuration.Response() => Right(()) + } + } + + case class SetRetentionCommand(retention: PositiveDurationSeconds) + extends BaseCommand[SetRetention.Request, SetRetention.Response, Unit] { + override def createRequest(): Right[String, SetRetention.Request] = + Right(SetRetention.Request(Some(retention.toProtoPrimitive))) + + override def submitRequest( + service: Svc, + request: SetRetention.Request, + ): Future[SetRetention.Response] = + submitSetRetention(service, request) + + override def handleResponse(response: SetRetention.Response): Either[String, Unit] = + response match { + case SetRetention.Response() => Right(()) + } + } + + case class GetScheduleCommand() + extends BaseCommand[ + GetSchedule.Request, + GetSchedule.Response, + Option[PruningSchedule], + ] { + override def createRequest(): Right[String, GetSchedule.Request] = + Right(GetSchedule.Request()) + + override def submitRequest( + service: Svc, + request: GetSchedule.Request, + ): Future[GetSchedule.Response] = + submitGetSchedule(service, request) + + override def handleResponse( + response: GetSchedule.Response + ): Either[ + String, + Option[PruningSchedule], + ] = response.schedule.fold( + Right(None): Either[String, Option[PruningSchedule]] + )(PruningSchedule.fromProtoV0(_).bimap(_.message, Some(_))) + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/SequencerAdminCommands.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/SequencerAdminCommands.scala new file mode 100644 index 0000000000..02fac50379 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/SequencerAdminCommands.scala @@ -0,0 +1,68 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.commands + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.digitalasset.canton.domain.admin.v0.SequencerAdministrationServiceGrpc.SequencerAdministrationServiceStub +import com.digitalasset.canton.domain.admin.v0 as adminproto +import com.digitalasset.canton.domain.sequencing.sequencer.SequencerPruningStatus +import com.digitalasset.canton.domain.sequencing.sequencer.traffic.SequencerTrafficStatus +import com.digitalasset.canton.topology.Member +import com.google.protobuf.empty.Empty +import io.grpc.ManagedChannel + +import scala.concurrent.Future + +object SequencerAdminCommands { + + abstract class BaseSequencerAdministrationCommands[Req, Rep, Res] + extends GrpcAdminCommand[Req, Rep, Res] { + override type Svc = SequencerAdministrationServiceStub + + override def createService(channel: ManagedChannel): SequencerAdministrationServiceStub = + adminproto.SequencerAdministrationServiceGrpc.stub(channel) + } + + final case object GetPruningStatus + extends BaseSequencerAdministrationCommands[ + Empty, + adminproto.SequencerPruningStatus, + SequencerPruningStatus, + ] { + override def createRequest(): Either[String, Empty] = Right(Empty()) + override def submitRequest( + service: SequencerAdministrationServiceStub, + request: Empty, + ): Future[adminproto.SequencerPruningStatus] = + service.pruningStatus(request) + override def handleResponse( + response: adminproto.SequencerPruningStatus + ): Either[String, SequencerPruningStatus] = + SequencerPruningStatus.fromProtoV0(response).leftMap(_.toString) + } + + final case class GetTrafficControlState(members: Seq[Member]) + extends BaseSequencerAdministrationCommands[ + adminproto.TrafficControlStateRequest, + adminproto.TrafficControlStateResponse, + SequencerTrafficStatus, + ] { + override def createRequest(): Either[String, adminproto.TrafficControlStateRequest] = Right( + adminproto.TrafficControlStateRequest(members.map(_.toProtoPrimitive)) + ) + override def submitRequest( + service: SequencerAdministrationServiceStub, + request: adminproto.TrafficControlStateRequest, + ): Future[adminproto.TrafficControlStateResponse] = + service.trafficControlState(request) + override def handleResponse( + response: adminproto.TrafficControlStateResponse + ): Either[String, SequencerTrafficStatus] = + response.trafficStates + .traverse(com.digitalasset.canton.traffic.MemberTrafficStatus.fromProtoV0) + .leftMap(_.toString) + .map(SequencerTrafficStatus) + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/StatusAdminCommands.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/StatusAdminCommands.scala new file mode 100644 index 0000000000..2152393e57 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/StatusAdminCommands.scala @@ -0,0 +1,89 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.commands + +import cats.syntax.either.* +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.health.admin.v0.{HealthDumpChunk, HealthDumpRequest} +import com.digitalasset.canton.health.admin.{data, v0} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.google.protobuf.empty.Empty +import io.grpc.Context.CancellableContext +import io.grpc.stub.StreamObserver +import io.grpc.{Context, ManagedChannel} + +import scala.concurrent.Future + +object StatusAdminCommands { + abstract class GetStatusBase[Result] extends GrpcAdminCommand[Empty, v0.NodeStatus, Result] { + override type Svc = v0.StatusServiceGrpc.StatusServiceStub + override def createService(channel: ManagedChannel): v0.StatusServiceGrpc.StatusServiceStub = + v0.StatusServiceGrpc.stub(channel) + override def createRequest(): Either[String, Empty] = Right(Empty()) + override def submitRequest( + service: v0.StatusServiceGrpc.StatusServiceStub, + request: Empty, + ): Future[v0.NodeStatus] = + service.status(request) + } + + class GetStatus[S <: data.NodeStatus.Status]( + deserialize: v0.NodeStatus.Status => ParsingResult[S] + ) extends GetStatusBase[data.NodeStatus[S]] { + override def handleResponse(response: v0.NodeStatus): Either[String, data.NodeStatus[S]] = + ((response.response match { + case v0.NodeStatus.Response.NotInitialized(notInitialized) => + Right(data.NodeStatus.NotInitialized(notInitialized.active)) + case v0.NodeStatus.Response.Success(status) => + deserialize(status).map(data.NodeStatus.Success(_)) + case v0.NodeStatus.Response.Empty => Left(ProtoDeserializationError.FieldNotSet("response")) + }): ParsingResult[data.NodeStatus[S]]).leftMap(_.toString) + } + + class GetHealthDump( + observer: StreamObserver[HealthDumpChunk], + chunkSize: Option[Int], + ) extends GrpcAdminCommand[HealthDumpRequest, CancellableContext, CancellableContext] { + override type Svc = v0.StatusServiceGrpc.StatusServiceStub + override def createService(channel: ManagedChannel): v0.StatusServiceGrpc.StatusServiceStub = + v0.StatusServiceGrpc.stub(channel) + override def submitRequest( + service: v0.StatusServiceGrpc.StatusServiceStub, + request: HealthDumpRequest, + ): Future[CancellableContext] = { + val context = Context.current().withCancellation() + context.run(() => service.healthDump(request, observer)) + Future.successful(context) + } + override def createRequest(): Either[String, HealthDumpRequest] = Right( + HealthDumpRequest(chunkSize) + ) + override def handleResponse(response: CancellableContext): Either[String, CancellableContext] = + Right(response) + + override def timeoutType: GrpcAdminCommand.TimeoutType = + GrpcAdminCommand.DefaultUnboundedTimeout + + } + + object IsRunning + extends StatusAdminCommands.FromStatus({ + case v0.NodeStatus.Response.Empty => false + case _ => true + }) + + object IsInitialized + extends StatusAdminCommands.FromStatus({ + case v0.NodeStatus.Response.Success(_) => true + case _ => false + }) + + class FromStatus(predicate: v0.NodeStatus.Response => Boolean) extends GetStatusBase[Boolean] { + override def handleResponse(response: v0.NodeStatus): Either[String, Boolean] = + (response.response match { + case v0.NodeStatus.Response.Empty => Left(ProtoDeserializationError.FieldNotSet("response")) + case other => Right(predicate(other)) + }).leftMap(_.toString) + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/TopologyAdminCommands.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/TopologyAdminCommands.scala new file mode 100644 index 0000000000..e9e80517a1 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/TopologyAdminCommands.scala @@ -0,0 +1,774 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.commands + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.daml.lf.data.Ref.PackageId +import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{ + DefaultUnboundedTimeout, + TimeoutType, +} +import com.digitalasset.canton.admin.api.client.data.* +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.crypto.{Fingerprint, KeyPurpose} +import com.digitalasset.canton.protocol.{DynamicDomainParameters as DynamicDomainParametersInternal} +import com.digitalasset.canton.topology.admin.grpc.BaseQuery +import com.digitalasset.canton.topology.admin.v0 +import com.digitalasset.canton.topology.admin.v0.AuthorizationSuccess +import com.digitalasset.canton.topology.admin.v0.InitializationServiceGrpc.InitializationServiceStub +import com.digitalasset.canton.topology.admin.v0.TopologyAggregationServiceGrpc.TopologyAggregationServiceStub +import com.digitalasset.canton.topology.admin.v0.TopologyManagerReadServiceGrpc.TopologyManagerReadServiceStub +import com.digitalasset.canton.topology.admin.v0.TopologyManagerWriteServiceGrpc.TopologyManagerWriteServiceStub +import com.digitalasset.canton.topology.store.StoredTopologyTransactions +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.topology.{DomainId, *} +import com.google.protobuf.ByteString +import com.google.protobuf.empty.Empty +import com.google.protobuf.timestamp.Timestamp +import io.grpc.ManagedChannel + +import java.time.Instant +import scala.concurrent.Future + +object TopologyAdminCommands { + + object Aggregation { + + abstract class BaseCommand[Req, Res, Result] extends GrpcAdminCommand[Req, Res, Result] { + override type Svc = TopologyAggregationServiceStub + override def createService(channel: ManagedChannel): TopologyAggregationServiceStub = + v0.TopologyAggregationServiceGrpc.stub(channel) + } + + final case class ListParties( + filterDomain: String, + filterParty: String, + filterParticipant: String, + asOf: Option[Instant], + limit: PositiveInt, + ) extends BaseCommand[v0.ListPartiesRequest, v0.ListPartiesResponse, Seq[ListPartiesResult]] { + + override def createRequest(): Either[String, v0.ListPartiesRequest] = + Right( + v0.ListPartiesRequest( + filterDomain = filterDomain, + filterParty = filterParty, + filterParticipant = filterParticipant, + asOf = asOf.map(ts => Timestamp(ts.getEpochSecond)), + limit = limit.value, + ) + ) + + override def submitRequest( + service: TopologyAggregationServiceStub, + request: v0.ListPartiesRequest, + ): Future[v0.ListPartiesResponse] = + service.listParties(request) + + override def handleResponse( + response: v0.ListPartiesResponse + ): Either[String, Seq[ListPartiesResult]] = + response.results.traverse(ListPartiesResult.fromProtoV0).leftMap(_.toString) + + // command will potentially take a long time + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + + final case class ListKeyOwners( + filterDomain: String, + filterKeyOwnerType: Option[MemberCode], + filterKeyOwnerUid: String, + asOf: Option[Instant], + limit: PositiveInt, + ) extends BaseCommand[v0.ListKeyOwnersRequest, v0.ListKeyOwnersResponse, Seq[ + ListKeyOwnersResult + ]] { + + override def createRequest(): Either[String, v0.ListKeyOwnersRequest] = + Right( + v0.ListKeyOwnersRequest( + filterDomain = filterDomain, + filterKeyOwnerType = filterKeyOwnerType.map(_.toProtoPrimitive).getOrElse(""), + filterKeyOwnerUid = filterKeyOwnerUid, + asOf = asOf.map(ts => Timestamp(ts.getEpochSecond)), + limit = limit.value, + ) + ) + + override def submitRequest( + service: TopologyAggregationServiceStub, + request: v0.ListKeyOwnersRequest, + ): Future[v0.ListKeyOwnersResponse] = + service.listKeyOwners(request) + + override def handleResponse( + response: v0.ListKeyOwnersResponse + ): Either[String, Seq[ListKeyOwnersResult]] = + response.results.traverse(ListKeyOwnersResult.fromProtoV0).leftMap(_.toString) + + // command will potentially take a long time + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + } + + object Write { + + abstract class BaseWriteCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = TopologyManagerWriteServiceStub + override def createService(channel: ManagedChannel): TopologyManagerWriteServiceStub = + v0.TopologyManagerWriteServiceGrpc.stub(channel) + + } + + abstract class BaseCommand[Req] + extends BaseWriteCommand[Req, v0.AuthorizationSuccess, ByteString] { + + protected def authData( + ops: TopologyChangeOp, + signedBy: Option[Fingerprint], + replaceExisting: Boolean, + force: Boolean, + ) = + Some( + v0.AuthorizationData( + ops.toProto, + signedBy.map(_.unwrap).getOrElse(""), + replaceExisting = replaceExisting, + forceChange = force, + ) + ) + + override def handleResponse(response: v0.AuthorizationSuccess): Either[String, ByteString] = + Right(response.serialized) + + } + + final case class AuthorizeNamespaceDelegation( + ops: TopologyChangeOp, + signedBy: Option[Fingerprint], + namespace: Fingerprint, + authorizedKey: Fingerprint, + isRootDelegation: Boolean, + force: Boolean, + ) extends BaseCommand[v0.NamespaceDelegationAuthorization] { + + override def createRequest(): Either[String, v0.NamespaceDelegationAuthorization] = + Right( + v0.NamespaceDelegationAuthorization( + authData(ops, signedBy, replaceExisting = false, force = force), + namespace.toProtoPrimitive, + authorizedKey.toProtoPrimitive, + isRootDelegation, + ) + ) + + override def submitRequest( + service: TopologyManagerWriteServiceStub, + request: v0.NamespaceDelegationAuthorization, + ): Future[v0.AuthorizationSuccess] = + service.authorizeNamespaceDelegation(request) + + } + + final case class AuthorizeIdentifierDelegation( + ops: TopologyChangeOp, + signedBy: Option[Fingerprint], + identifier: UniqueIdentifier, + authorizedKey: Fingerprint, + ) extends BaseCommand[v0.IdentifierDelegationAuthorization] { + + override def createRequest(): Either[String, v0.IdentifierDelegationAuthorization] = + Right( + v0.IdentifierDelegationAuthorization( + authData(ops, signedBy, replaceExisting = false, force = false), + identifier.toProtoPrimitive, + authorizedKey.toProtoPrimitive, + ) + ) + + override def submitRequest( + service: TopologyManagerWriteServiceStub, + request: v0.IdentifierDelegationAuthorization, + ): Future[v0.AuthorizationSuccess] = + service.authorizeIdentifierDelegation(request) + + } + + final case class AuthorizeOwnerToKeyMapping( + ops: TopologyChangeOp, + signedBy: Option[Fingerprint], + keyOwner: Member, + fingerprintOfKey: Fingerprint, + purpose: KeyPurpose, + force: Boolean, + ) extends BaseCommand[v0.OwnerToKeyMappingAuthorization] { + + override def createRequest(): Either[String, v0.OwnerToKeyMappingAuthorization] = Right( + v0.OwnerToKeyMappingAuthorization( + authData(ops, signedBy, replaceExisting = false, force = force), + keyOwner.toProtoPrimitive, + fingerprintOfKey.toProtoPrimitive, + purpose.toProtoEnum, + ) + ) + + override def submitRequest( + service: TopologyManagerWriteServiceStub, + request: v0.OwnerToKeyMappingAuthorization, + ): Future[v0.AuthorizationSuccess] = + service.authorizeOwnerToKeyMapping(request) + + } + + final case class AuthorizePartyToParticipant( + ops: TopologyChangeOp, + signedBy: Option[Fingerprint], + side: RequestSide, + party: PartyId, + participant: ParticipantId, + permission: ParticipantPermission, + replaceExisting: Boolean, + force: Boolean, + ) extends BaseCommand[v0.PartyToParticipantAuthorization] { + + override def createRequest(): Either[String, v0.PartyToParticipantAuthorization] = + Right( + v0.PartyToParticipantAuthorization( + authData(ops, signedBy, replaceExisting = replaceExisting, force = force), + side.toProtoEnum, + party.uid.toProtoPrimitive, + participant.toProtoPrimitive, + permission.toProtoEnum, + ) + ) + + override def submitRequest( + service: TopologyManagerWriteServiceStub, + request: v0.PartyToParticipantAuthorization, + ): Future[v0.AuthorizationSuccess] = + service.authorizePartyToParticipant(request) + + } + + final case class AuthorizeParticipantDomainState( + ops: TopologyChangeOp, + signedBy: Option[Fingerprint], + side: RequestSide, + domain: DomainId, + participant: ParticipantId, + permission: ParticipantPermission, + trustLevel: TrustLevel, + replaceExisting: Boolean, + ) extends BaseCommand[v0.ParticipantDomainStateAuthorization] { + + override def createRequest(): Either[String, v0.ParticipantDomainStateAuthorization] = + Right( + v0.ParticipantDomainStateAuthorization( + authData(ops, signedBy, replaceExisting = replaceExisting, force = false), + side.toProtoEnum, + domain.unwrap.toProtoPrimitive, + participant.toProtoPrimitive, + permission.toProtoEnum, + trustLevel.toProtoEnum, + ) + ) + + override def submitRequest( + service: TopologyManagerWriteServiceStub, + request: v0.ParticipantDomainStateAuthorization, + ): Future[v0.AuthorizationSuccess] = + service.authorizeParticipantDomainState(request) + + } + + final case class AuthorizeMediatorDomainState( + ops: TopologyChangeOp, + signedBy: Option[Fingerprint], + side: RequestSide, + domain: DomainId, + mediator: MediatorId, + replaceExisting: Boolean, + ) extends BaseCommand[v0.MediatorDomainStateAuthorization] { + + override def createRequest(): Either[String, v0.MediatorDomainStateAuthorization] = + Right( + v0.MediatorDomainStateAuthorization( + authData(ops, signedBy, replaceExisting = replaceExisting, force = false), + side.toProtoEnum, + domain.unwrap.toProtoPrimitive, + mediator.uid.toProtoPrimitive, + ) + ) + + override def submitRequest( + service: TopologyManagerWriteServiceStub, + request: v0.MediatorDomainStateAuthorization, + ): Future[v0.AuthorizationSuccess] = + service.authorizeMediatorDomainState(request) + + } + + final case class AuthorizeVettedPackages( + ops: TopologyChangeOp, + signedBy: Option[Fingerprint], + participant: ParticipantId, + packageIds: Seq[PackageId], + force: Boolean, + ) extends BaseCommand[v0.VettedPackagesAuthorization] { + + override def createRequest(): Either[String, v0.VettedPackagesAuthorization] = + Right( + v0.VettedPackagesAuthorization( + authData(ops, signedBy, replaceExisting = false, force = force), + participant.uid.toProtoPrimitive, + packageIds = packageIds, + ) + ) + + override def submitRequest( + service: TopologyManagerWriteServiceStub, + request: v0.VettedPackagesAuthorization, + ): Future[v0.AuthorizationSuccess] = + service.authorizeVettedPackages(request) + + } + + final case class AuthorizeDomainParametersChange( + signedBy: Option[Fingerprint], + domainId: DomainId, + newParameters: DynamicDomainParameters, + force: Boolean, + ) extends BaseCommand[v0.DomainParametersChangeAuthorization] { + override def createRequest(): Either[String, v0.DomainParametersChangeAuthorization] = + v0.DomainParametersChangeAuthorization( + authorization = + authData(TopologyChangeOp.Replace, signedBy, replaceExisting = false, force = force), + domain = domainId.toProtoPrimitive, + parameters = newParameters.toProto, + ).asRight + + override def submitRequest( + service: TopologyManagerWriteServiceStub, + request: v0.DomainParametersChangeAuthorization, + ): Future[AuthorizationSuccess] = service.authorizeDomainParametersChange(request) + } + + final case class AuthorizeDomainParametersChangeInternal( + signedBy: Option[Fingerprint], + domainId: DomainId, + newParameters: DynamicDomainParametersInternal, + force: Boolean, + ) extends BaseCommand[v0.DomainParametersChangeAuthorization] { + override def createRequest(): Either[String, v0.DomainParametersChangeAuthorization] = { + val parameters = + v0.DomainParametersChangeAuthorization.Parameters + .ParametersV1(newParameters.toProtoV2) + + v0.DomainParametersChangeAuthorization( + authorization = + authData(TopologyChangeOp.Replace, signedBy, replaceExisting = false, force = force), + domain = domainId.toProtoPrimitive, + parameters = parameters, + ).asRight + } + + override def submitRequest( + service: TopologyManagerWriteServiceStub, + request: v0.DomainParametersChangeAuthorization, + ): Future[AuthorizationSuccess] = service.authorizeDomainParametersChange(request) + } + + final case class AddSignedTopologyTransaction(bytes: ByteString) + extends BaseWriteCommand[v0.SignedTopologyTransactionAddition, v0.AdditionSuccess, Unit] { + + override def createRequest(): Either[String, v0.SignedTopologyTransactionAddition] = + Right(v0.SignedTopologyTransactionAddition(serialized = bytes)) + + override def submitRequest( + service: TopologyManagerWriteServiceStub, + request: v0.SignedTopologyTransactionAddition, + ): Future[v0.AdditionSuccess] = + service.addSignedTopologyTransaction(request) + + override def handleResponse(response: v0.AdditionSuccess): Either[String, Unit] = + Right(()) + } + + } + + object Read { + + abstract class BaseCommand[Req, Res, Ret] extends GrpcAdminCommand[Req, Res, Ret] { + override type Svc = TopologyManagerReadServiceStub + override def createService(channel: ManagedChannel): TopologyManagerReadServiceStub = + v0.TopologyManagerReadServiceGrpc.stub(channel) + + // command will potentially take a long time + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + + final case class ListPartyToParticipant( + query: BaseQuery, + filterParty: String, + filterParticipant: String, + filterRequestSide: Option[RequestSide], + filterPermission: Option[ParticipantPermission], + ) extends BaseCommand[v0.ListPartyToParticipantRequest, v0.ListPartyToParticipantResult, Seq[ + ListPartyToParticipantResult + ]] { + + override def createRequest(): Either[String, v0.ListPartyToParticipantRequest] = + Right( + new v0.ListPartyToParticipantRequest( + baseQuery = Some(query.toProtoV0), + filterParty, + filterParticipant, + filterRequestSide + .map(_.toProtoEnum) + .map(new v0.ListPartyToParticipantRequest.FilterRequestSide(_)), + filterPermission + .map(_.toProtoEnum) + .map(new v0.ListPartyToParticipantRequest.FilterPermission(_)), + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceStub, + request: v0.ListPartyToParticipantRequest, + ): Future[v0.ListPartyToParticipantResult] = + service.listPartyToParticipant(request) + + override def handleResponse( + response: v0.ListPartyToParticipantResult + ): Either[String, Seq[ListPartyToParticipantResult]] = + response.results.traverse(ListPartyToParticipantResult.fromProtoV0).leftMap(_.toString) + + } + + final case class ListOwnerToKeyMapping( + query: BaseQuery, + filterKeyOwnerType: Option[MemberCode], + filterKeyOwnerUid: String, + filterKeyPurpose: Option[KeyPurpose], + ) extends BaseCommand[v0.ListOwnerToKeyMappingRequest, v0.ListOwnerToKeyMappingResult, Seq[ + ListOwnerToKeyMappingResult + ]] { + + override def createRequest(): Either[String, v0.ListOwnerToKeyMappingRequest] = + Right( + new v0.ListOwnerToKeyMappingRequest( + baseQuery = Some(query.toProtoV0), + filterKeyOwnerType = filterKeyOwnerType.map(_.toProtoPrimitive).getOrElse(""), + filterKeyOwnerUid = filterKeyOwnerUid, + filterKeyPurpose + .map(_.toProtoEnum) + .map(new admin.v0.ListOwnerToKeyMappingRequest.FilterKeyPurpose(_)), + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceStub, + request: v0.ListOwnerToKeyMappingRequest, + ): Future[v0.ListOwnerToKeyMappingResult] = + service.listOwnerToKeyMapping(request) + + override def handleResponse( + response: v0.ListOwnerToKeyMappingResult + ): Either[String, Seq[ListOwnerToKeyMappingResult]] = + response.results.traverse(ListOwnerToKeyMappingResult.fromProtoV0).leftMap(_.toString) + } + + final case class ListNamespaceDelegation(query: BaseQuery, filterNamespace: String) + extends BaseCommand[ + v0.ListNamespaceDelegationRequest, + v0.ListNamespaceDelegationResult, + Seq[ListNamespaceDelegationResult], + ] { + + override def createRequest(): Either[String, v0.ListNamespaceDelegationRequest] = + Right( + new v0.ListNamespaceDelegationRequest( + baseQuery = Some(query.toProtoV0), + filterNamespace = filterNamespace, + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceStub, + request: v0.ListNamespaceDelegationRequest, + ): Future[v0.ListNamespaceDelegationResult] = + service.listNamespaceDelegation(request) + + override def handleResponse( + response: v0.ListNamespaceDelegationResult + ): Either[String, Seq[ListNamespaceDelegationResult]] = + response.results.traverse(ListNamespaceDelegationResult.fromProtoV0).leftMap(_.toString) + } + + final case class ListIdentifierDelegation(query: BaseQuery, filterUid: String) + extends BaseCommand[ + v0.ListIdentifierDelegationRequest, + v0.ListIdentifierDelegationResult, + Seq[ListIdentifierDelegationResult], + ] { + + override def createRequest(): Either[String, v0.ListIdentifierDelegationRequest] = + Right( + new v0.ListIdentifierDelegationRequest( + baseQuery = Some(query.toProtoV0), + filterUid = filterUid, + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceStub, + request: v0.ListIdentifierDelegationRequest, + ): Future[v0.ListIdentifierDelegationResult] = + service.listIdentifierDelegation(request) + + override def handleResponse( + response: v0.ListIdentifierDelegationResult + ): Either[String, Seq[ListIdentifierDelegationResult]] = + response.results.traverse(ListIdentifierDelegationResult.fromProtoV0).leftMap(_.toString) + } + + final case class ListSignedLegalIdentityClaim(query: BaseQuery, filterUid: String) + extends BaseCommand[ + v0.ListSignedLegalIdentityClaimRequest, + v0.ListSignedLegalIdentityClaimResult, + Seq[ListSignedLegalIdentityClaimResult], + ] { + + override def createRequest(): Either[String, v0.ListSignedLegalIdentityClaimRequest] = + Right( + new v0.ListSignedLegalIdentityClaimRequest( + baseQuery = Some(query.toProtoV0), + filterUid = filterUid, + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceStub, + request: v0.ListSignedLegalIdentityClaimRequest, + ): Future[v0.ListSignedLegalIdentityClaimResult] = + service.listSignedLegalIdentityClaim(request) + + override def handleResponse( + response: v0.ListSignedLegalIdentityClaimResult + ): Either[String, Seq[ListSignedLegalIdentityClaimResult]] = + response.results + .traverse(ListSignedLegalIdentityClaimResult.fromProtoV0) + .leftMap(_.toString) + } + + final case class ListVettedPackages(query: BaseQuery, filterParticipant: String) + extends BaseCommand[v0.ListVettedPackagesRequest, v0.ListVettedPackagesResult, Seq[ + ListVettedPackagesResult + ]] { + + override def createRequest(): Either[String, v0.ListVettedPackagesRequest] = + Right( + new v0.ListVettedPackagesRequest( + baseQuery = Some(query.toProtoV0), + filterParticipant, + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceStub, + request: v0.ListVettedPackagesRequest, + ): Future[v0.ListVettedPackagesResult] = + service.listVettedPackages(request) + + override def handleResponse( + response: v0.ListVettedPackagesResult + ): Either[String, Seq[ListVettedPackagesResult]] = + response.results.traverse(ListVettedPackagesResult.fromProtoV0).leftMap(_.toString) + } + + final case class ListDomainParametersChanges(query: BaseQuery) + extends BaseCommand[ + v0.ListDomainParametersChangesRequest, + v0.ListDomainParametersChangesResult, + Seq[ListDomainParametersChangeResult], + ] { + override def submitRequest( + service: TopologyManagerReadServiceStub, + request: v0.ListDomainParametersChangesRequest, + ): Future[v0.ListDomainParametersChangesResult] = service.listDomainParametersChanges(request) + + override def createRequest(): Either[String, v0.ListDomainParametersChangesRequest] = Right( + v0.ListDomainParametersChangesRequest(Some(query.toProtoV0)) + ) + + override def handleResponse( + response: v0.ListDomainParametersChangesResult + ): Either[String, Seq[ListDomainParametersChangeResult]] = + response.results.traverse(ListDomainParametersChangeResult.fromProtoV0).leftMap(_.toString) + } + + final case class ListStores() + extends BaseCommand[v0.ListAvailableStoresRequest, v0.ListAvailableStoresResult, Seq[ + String + ]] { + + override def createRequest(): Either[String, v0.ListAvailableStoresRequest] = + Right(v0.ListAvailableStoresRequest()) + + override def submitRequest( + service: TopologyManagerReadServiceStub, + request: v0.ListAvailableStoresRequest, + ): Future[v0.ListAvailableStoresResult] = + service.listAvailableStores(request) + + override def handleResponse( + response: v0.ListAvailableStoresResult + ): Either[String, Seq[String]] = + Right(response.storeIds) + } + + final case class ListParticipantDomainState( + query: BaseQuery, + filterDomain: String, + filterParticipant: String, + ) extends BaseCommand[ + v0.ListParticipantDomainStateRequest, + v0.ListParticipantDomainStateResult, + Seq[ListParticipantDomainStateResult], + ] { + + override def createRequest(): Either[String, v0.ListParticipantDomainStateRequest] = + Right( + new v0.ListParticipantDomainStateRequest( + baseQuery = Some(query.toProtoV0), + filterDomain = filterDomain, + filterParticipant = filterParticipant, + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceStub, + request: v0.ListParticipantDomainStateRequest, + ): Future[v0.ListParticipantDomainStateResult] = + service.listParticipantDomainState(request) + + override def handleResponse( + response: v0.ListParticipantDomainStateResult + ): Either[String, Seq[ListParticipantDomainStateResult]] = + response.results.traverse(ListParticipantDomainStateResult.fromProtoV0).leftMap(_.toString) + } + + final case class ListMediatorDomainState( + query: BaseQuery, + filterDomain: String, + filterMediator: String, + ) extends BaseCommand[ + v0.ListMediatorDomainStateRequest, + v0.ListMediatorDomainStateResult, + Seq[ListMediatorDomainStateResult], + ] { + + override def createRequest(): Either[String, v0.ListMediatorDomainStateRequest] = + Right( + new v0.ListMediatorDomainStateRequest( + baseQuery = Some(query.toProtoV0), + filterDomain = filterDomain, + filterMediator = filterMediator, + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceStub, + request: v0.ListMediatorDomainStateRequest, + ): Future[v0.ListMediatorDomainStateResult] = + service.listMediatorDomainState(request) + + override def handleResponse( + response: v0.ListMediatorDomainStateResult + ): Either[String, Seq[ListMediatorDomainStateResult]] = + response.results.traverse(ListMediatorDomainStateResult.fromProtoV0).leftMap(_.toString) + } + + final case class ListAll(query: BaseQuery) + extends BaseCommand[ + v0.ListAllRequest, + v0.ListAllResponse, + StoredTopologyTransactions[ + TopologyChangeOp + ], + ] { + override def createRequest(): Either[String, v0.ListAllRequest] = + Right(new v0.ListAllRequest(Some(query.toProtoV0))) + + override def submitRequest( + service: TopologyManagerReadServiceStub, + request: v0.ListAllRequest, + ): Future[v0.ListAllResponse] = service.listAll(request) + + override def handleResponse( + response: v0.ListAllResponse + ): Either[String, StoredTopologyTransactions[TopologyChangeOp]] = + response.result + .fold[Either[String, StoredTopologyTransactions[TopologyChangeOp]]]( + Right(StoredTopologyTransactions.empty) + ) { collection => + StoredTopologyTransactions.fromProtoV0(collection).leftMap(_.toString) + } + } + } + + object Init { + + abstract class BaseInitializationService[Req, Resp, Res] + extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = InitializationServiceStub + override def createService(channel: ManagedChannel): InitializationServiceStub = + v0.InitializationServiceGrpc.stub(channel) + + } + + final case class InitId(identifier: String, fingerprint: String) + extends BaseInitializationService[v0.InitIdRequest, v0.InitIdResponse, UniqueIdentifier] { + + override def createRequest(): Either[String, v0.InitIdRequest] = + Right(v0.InitIdRequest(identifier, fingerprint, instance = "")) + + override def submitRequest( + service: InitializationServiceStub, + request: v0.InitIdRequest, + ): Future[v0.InitIdResponse] = + service.initId(request) + + override def handleResponse(response: v0.InitIdResponse): Either[String, UniqueIdentifier] = + UniqueIdentifier.fromProtoPrimitive_(response.uniqueIdentifier) + } + + final case class GetId() + extends BaseInitializationService[Empty, v0.GetIdResponse, UniqueIdentifier] { + override def createRequest(): Either[String, Empty] = + Right(Empty()) + + override def submitRequest( + service: InitializationServiceStub, + request: Empty, + ): Future[v0.GetIdResponse] = + service.getId(request) + + override def handleResponse( + response: v0.GetIdResponse + ): Either[String, UniqueIdentifier] = { + if (response.uniqueIdentifier.nonEmpty) + UniqueIdentifier.fromProtoPrimitive_(response.uniqueIdentifier) + else + Left( + s"Node ${response.instance} is not initialized and therefore does not have an Id assigned yet." + ) + } + } + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/TopologyAdminCommandsX.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/TopologyAdminCommandsX.scala new file mode 100644 index 0000000000..59d3ad52ab --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/TopologyAdminCommandsX.scala @@ -0,0 +1,771 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.commands + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{ + DefaultUnboundedTimeout, + TimeoutType, +} +import com.digitalasset.canton.admin.api.client.data.topologyx.* +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.crypto.Fingerprint +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.admin.grpc.BaseQueryX +import com.digitalasset.canton.topology.admin.v1 +import com.digitalasset.canton.topology.admin.v1.AuthorizeRequest.Type.{Proposal, TransactionHash} +import com.digitalasset.canton.topology.admin.v1.IdentityInitializationServiceXGrpc.IdentityInitializationServiceXStub +import com.digitalasset.canton.topology.admin.v1.TopologyManagerReadServiceXGrpc.TopologyManagerReadServiceXStub +import com.digitalasset.canton.topology.admin.v1.TopologyManagerWriteServiceXGrpc.TopologyManagerWriteServiceXStub +import com.digitalasset.canton.topology.admin.v1.{ + AddTransactionsRequest, + AddTransactionsResponse, + AuthorizeRequest, + AuthorizeResponse, + ListTrafficStateRequest, + SignTransactionsRequest, + SignTransactionsResponse, +} +import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX +import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.GenericStoredTopologyTransactionsX +import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX +import com.digitalasset.canton.topology.transaction.{ + SignedTopologyTransactionX, + TopologyChangeOpX, + TopologyMappingX, +} +import com.google.protobuf.empty.Empty +import io.grpc.ManagedChannel + +import scala.concurrent.Future +import scala.reflect.ClassTag + +object TopologyAdminCommandsX { + + object Read { + + abstract class BaseCommand[Req, Res, Ret] extends GrpcAdminCommand[Req, Res, Ret] { + override type Svc = TopologyManagerReadServiceXStub + override def createService(channel: ManagedChannel): TopologyManagerReadServiceXStub = + v1.TopologyManagerReadServiceXGrpc.stub(channel) + + // command will potentially take a long time + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + + final case class ListTrafficControlState( + query: BaseQueryX, + filterMember: String, + ) extends BaseCommand[ + v1.ListTrafficStateRequest, + v1.ListTrafficStateResult, + Seq[ListTrafficStateResult], + ] { + + override def createRequest(): Either[String, v1.ListTrafficStateRequest] = + Right( + new ListTrafficStateRequest( + baseQuery = Some(query.toProtoV1), + filterMember = filterMember, + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceXStub, + request: v1.ListTrafficStateRequest, + ): Future[v1.ListTrafficStateResult] = + service.listTrafficState(request) + + override def handleResponse( + response: v1.ListTrafficStateResult + ): Either[String, Seq[ListTrafficStateResult]] = + response.results + .traverse(ListTrafficStateResult.fromProtoV1) + .leftMap(_.toString) + } + + final case class ListNamespaceDelegation( + query: BaseQueryX, + filterNamespace: String, + filterTargetKey: Option[Fingerprint], + ) extends BaseCommand[ + v1.ListNamespaceDelegationRequest, + v1.ListNamespaceDelegationResult, + Seq[ListNamespaceDelegationResult], + ] { + + override def createRequest(): Either[String, v1.ListNamespaceDelegationRequest] = + Right( + new v1.ListNamespaceDelegationRequest( + baseQuery = Some(query.toProtoV1), + filterNamespace = filterNamespace, + filterTargetKeyFingerprint = filterTargetKey.map(_.toProtoPrimitive).getOrElse(""), + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceXStub, + request: v1.ListNamespaceDelegationRequest, + ): Future[v1.ListNamespaceDelegationResult] = + service.listNamespaceDelegation(request) + + override def handleResponse( + response: v1.ListNamespaceDelegationResult + ): Either[String, Seq[ListNamespaceDelegationResult]] = + response.results.traverse(ListNamespaceDelegationResult.fromProtoV1).leftMap(_.toString) + } + + final case class ListUnionspaceDefinition( + query: BaseQueryX, + filterNamespace: String, + ) extends BaseCommand[ + v1.ListUnionspaceDefinitionRequest, + v1.ListUnionspaceDefinitionResult, + Seq[ListUnionspaceDefinitionResult], + ] { + + override def createRequest(): Either[String, v1.ListUnionspaceDefinitionRequest] = + Right( + new v1.ListUnionspaceDefinitionRequest( + baseQuery = Some(query.toProtoV1), + filterNamespace = filterNamespace, + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceXStub, + request: v1.ListUnionspaceDefinitionRequest, + ): Future[v1.ListUnionspaceDefinitionResult] = + service.listUnionspaceDefinition(request) + + override def handleResponse( + response: v1.ListUnionspaceDefinitionResult + ): Either[String, Seq[ListUnionspaceDefinitionResult]] = + response.results.traverse(ListUnionspaceDefinitionResult.fromProtoV1).leftMap(_.toString) + } + + final case class ListIdentifierDelegation( + query: BaseQueryX, + filterUid: String, + filterTargetKey: Option[Fingerprint], + ) extends BaseCommand[ + v1.ListIdentifierDelegationRequest, + v1.ListIdentifierDelegationResult, + Seq[ListIdentifierDelegationResult], + ] { + + override def createRequest(): Either[String, v1.ListIdentifierDelegationRequest] = + Right( + new v1.ListIdentifierDelegationRequest( + baseQuery = Some(query.toProtoV1), + filterUid = filterUid, + filterTargetKeyFingerprint = filterTargetKey.map(_.toProtoPrimitive).getOrElse(""), + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceXStub, + request: v1.ListIdentifierDelegationRequest, + ): Future[v1.ListIdentifierDelegationResult] = + service.listIdentifierDelegation(request) + + override def handleResponse( + response: v1.ListIdentifierDelegationResult + ): Either[String, Seq[ListIdentifierDelegationResult]] = + response.results.traverse(ListIdentifierDelegationResult.fromProtoV1).leftMap(_.toString) + } + + final case class ListOwnerToKeyMapping( + query: BaseQueryX, + filterKeyOwnerType: Option[MemberCode], + filterKeyOwnerUid: String, + ) extends BaseCommand[v1.ListOwnerToKeyMappingRequest, v1.ListOwnerToKeyMappingResult, Seq[ + ListOwnerToKeyMappingResult + ]] { + + override def createRequest(): Either[String, v1.ListOwnerToKeyMappingRequest] = + Right( + new v1.ListOwnerToKeyMappingRequest( + baseQuery = Some(query.toProtoV1), + filterKeyOwnerType = filterKeyOwnerType.map(_.toProtoPrimitive).getOrElse(""), + filterKeyOwnerUid = filterKeyOwnerUid, + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceXStub, + request: v1.ListOwnerToKeyMappingRequest, + ): Future[v1.ListOwnerToKeyMappingResult] = + service.listOwnerToKeyMapping(request) + + override def handleResponse( + response: v1.ListOwnerToKeyMappingResult + ): Either[String, Seq[ListOwnerToKeyMappingResult]] = + response.results.traverse(ListOwnerToKeyMappingResult.fromProtoV1).leftMap(_.toString) + } + + final case class ListDomainTrustCertificate( + query: BaseQueryX, + filterUid: String, + ) extends BaseCommand[ + v1.ListDomainTrustCertificateRequest, + v1.ListDomainTrustCertificateResult, + Seq[ListDomainTrustCertificateResult], + ] { + + override def createRequest(): Either[String, v1.ListDomainTrustCertificateRequest] = + Right( + new v1.ListDomainTrustCertificateRequest( + baseQuery = Some(query.toProtoV1), + filterUid = filterUid, + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceXStub, + request: v1.ListDomainTrustCertificateRequest, + ): Future[v1.ListDomainTrustCertificateResult] = + service.listDomainTrustCertificate(request) + + override def handleResponse( + response: v1.ListDomainTrustCertificateResult + ): Either[String, Seq[ListDomainTrustCertificateResult]] = + response.results.traverse(ListDomainTrustCertificateResult.fromProtoV1).leftMap(_.toString) + } + + final case class ListParticipantDomainPermission( + query: BaseQueryX, + filterUid: String, + ) extends BaseCommand[ + v1.ListParticipantDomainPermissionRequest, + v1.ListParticipantDomainPermissionResult, + Seq[ListParticipantDomainPermissionResult], + ] { + + override def createRequest(): Either[String, v1.ListParticipantDomainPermissionRequest] = + Right( + new v1.ListParticipantDomainPermissionRequest( + baseQuery = Some(query.toProtoV1), + filterUid = filterUid, + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceXStub, + request: v1.ListParticipantDomainPermissionRequest, + ): Future[v1.ListParticipantDomainPermissionResult] = + service.listParticipantDomainPermission(request) + + override def handleResponse( + response: v1.ListParticipantDomainPermissionResult + ): Either[String, Seq[ListParticipantDomainPermissionResult]] = + response.results + .traverse(ListParticipantDomainPermissionResult.fromProtoV1) + .leftMap(_.toString) + } + + final case class ListPartyHostingLimits( + query: BaseQueryX, + filterUid: String, + ) extends BaseCommand[ + v1.ListPartyHostingLimitsRequest, + v1.ListPartyHostingLimitsResult, + Seq[ListPartyHostingLimitsResult], + ] { + + override def createRequest(): Either[String, v1.ListPartyHostingLimitsRequest] = + Right( + new v1.ListPartyHostingLimitsRequest( + baseQuery = Some(query.toProtoV1), + filterUid = filterUid, + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceXStub, + request: v1.ListPartyHostingLimitsRequest, + ): Future[v1.ListPartyHostingLimitsResult] = + service.listPartyHostingLimits(request) + + override def handleResponse( + response: v1.ListPartyHostingLimitsResult + ): Either[String, Seq[ListPartyHostingLimitsResult]] = + response.results + .traverse(ListPartyHostingLimitsResult.fromProtoV1) + .leftMap(_.toString) + } + + final case class ListVettedPackages( + query: BaseQueryX, + filterParticipant: String, + ) extends BaseCommand[ + v1.ListVettedPackagesRequest, + v1.ListVettedPackagesResult, + Seq[ListVettedPackagesResult], + ] { + + override def createRequest(): Either[String, v1.ListVettedPackagesRequest] = + Right( + new v1.ListVettedPackagesRequest( + baseQuery = Some(query.toProtoV1), + filterParticipant = filterParticipant, + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceXStub, + request: v1.ListVettedPackagesRequest, + ): Future[v1.ListVettedPackagesResult] = + service.listVettedPackages(request) + + override def handleResponse( + response: v1.ListVettedPackagesResult + ): Either[String, Seq[ListVettedPackagesResult]] = + response.results + .traverse(ListVettedPackagesResult.fromProtoV1) + .leftMap(_.toString) + } + + final case class ListPartyToParticipant( + query: BaseQueryX, + filterParty: String, + filterParticipant: String, + ) extends BaseCommand[ + v1.ListPartyToParticipantRequest, + v1.ListPartyToParticipantResult, + Seq[ListPartyToParticipantResult], + ] { + + override def createRequest(): Either[String, v1.ListPartyToParticipantRequest] = + Right( + new v1.ListPartyToParticipantRequest( + baseQuery = Some(query.toProtoV1), + filterParty = filterParty, + filterParticipant = filterParticipant, + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceXStub, + request: v1.ListPartyToParticipantRequest, + ): Future[v1.ListPartyToParticipantResult] = + service.listPartyToParticipant(request) + + override def handleResponse( + response: v1.ListPartyToParticipantResult + ): Either[String, Seq[ListPartyToParticipantResult]] = + response.results + .traverse(ListPartyToParticipantResult.fromProtoV1) + .leftMap(_.toString) + } + + final case class ListAuthorityOf( + query: BaseQueryX, + filterParty: String, + ) extends BaseCommand[ + v1.ListAuthorityOfRequest, + v1.ListAuthorityOfResult, + Seq[ListAuthorityOfResult], + ] { + + override def createRequest(): Either[String, v1.ListAuthorityOfRequest] = + Right( + new v1.ListAuthorityOfRequest( + baseQuery = Some(query.toProtoV1), + filterParty = filterParty, + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceXStub, + request: v1.ListAuthorityOfRequest, + ): Future[v1.ListAuthorityOfResult] = + service.listAuthorityOf(request) + + override def handleResponse( + response: v1.ListAuthorityOfResult + ): Either[String, Seq[ListAuthorityOfResult]] = + response.results + .traverse(ListAuthorityOfResult.fromProtoV1) + .leftMap(_.toString) + } + + final case class DomainParametersState( + query: BaseQueryX, + filterDomain: String, + ) extends BaseCommand[ + v1.ListDomainParametersStateRequest, + v1.ListDomainParametersStateResult, + Seq[ListDomainParametersStateResult], + ] { + + override def createRequest(): Either[String, v1.ListDomainParametersStateRequest] = + Right( + new v1.ListDomainParametersStateRequest( + baseQuery = Some(query.toProtoV1), + filterDomain = filterDomain, + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceXStub, + request: v1.ListDomainParametersStateRequest, + ): Future[v1.ListDomainParametersStateResult] = + service.listDomainParametersState(request) + + override def handleResponse( + response: v1.ListDomainParametersStateResult + ): Either[String, Seq[ListDomainParametersStateResult]] = + response.results + .traverse(ListDomainParametersStateResult.fromProtoV1) + .leftMap(_.toString) + } + + final case class MediatorDomainState( + query: BaseQueryX, + filterDomain: String, + ) extends BaseCommand[ + v1.ListMediatorDomainStateRequest, + v1.ListMediatorDomainStateResult, + Seq[ListMediatorDomainStateResult], + ] { + + override def createRequest(): Either[String, v1.ListMediatorDomainStateRequest] = + Right( + new v1.ListMediatorDomainStateRequest( + baseQuery = Some(query.toProtoV1), + filterDomain = filterDomain, + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceXStub, + request: v1.ListMediatorDomainStateRequest, + ): Future[v1.ListMediatorDomainStateResult] = + service.listMediatorDomainState(request) + + override def handleResponse( + response: v1.ListMediatorDomainStateResult + ): Either[String, Seq[ListMediatorDomainStateResult]] = + response.results + .traverse(ListMediatorDomainStateResult.fromProtoV1) + .leftMap(_.toString) + } + + final case class SequencerDomainState( + query: BaseQueryX, + filterDomain: String, + ) extends BaseCommand[ + v1.ListSequencerDomainStateRequest, + v1.ListSequencerDomainStateResult, + Seq[ListSequencerDomainStateResult], + ] { + + override def createRequest(): Either[String, v1.ListSequencerDomainStateRequest] = + Right( + new v1.ListSequencerDomainStateRequest( + baseQuery = Some(query.toProtoV1), + filterDomain = filterDomain, + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceXStub, + request: v1.ListSequencerDomainStateRequest, + ): Future[v1.ListSequencerDomainStateResult] = + service.listSequencerDomainState(request) + + override def handleResponse( + response: v1.ListSequencerDomainStateResult + ): Either[String, Seq[ListSequencerDomainStateResult]] = + response.results + .traverse(ListSequencerDomainStateResult.fromProtoV1) + .leftMap(_.toString) + } + + final case class PurgeTopologyTransactionX( + query: BaseQueryX, + filterDomain: String, + ) extends BaseCommand[ + v1.ListPurgeTopologyTransactionXRequest, + v1.ListPurgeTopologyTransactionXResult, + Seq[ListPurgeTopologyTransactionXResult], + ] { + + override def createRequest(): Either[String, v1.ListPurgeTopologyTransactionXRequest] = + Right( + new v1.ListPurgeTopologyTransactionXRequest( + baseQuery = Some(query.toProtoV1), + filterDomain = filterDomain, + ) + ) + + override def submitRequest( + service: TopologyManagerReadServiceXStub, + request: v1.ListPurgeTopologyTransactionXRequest, + ): Future[v1.ListPurgeTopologyTransactionXResult] = + service.listPurgeTopologyTransactionX(request) + + override def handleResponse( + response: v1.ListPurgeTopologyTransactionXResult + ): Either[String, Seq[ListPurgeTopologyTransactionXResult]] = + response.results + .traverse(ListPurgeTopologyTransactionXResult.fromProtoV1) + .leftMap(_.toString) + } + + final case class ListStores() + extends BaseCommand[v1.ListAvailableStoresRequest, v1.ListAvailableStoresResult, Seq[ + String + ]] { + + override def createRequest(): Either[String, v1.ListAvailableStoresRequest] = + Right(v1.ListAvailableStoresRequest()) + + override def submitRequest( + service: TopologyManagerReadServiceXStub, + request: v1.ListAvailableStoresRequest, + ): Future[v1.ListAvailableStoresResult] = + service.listAvailableStores(request) + + override def handleResponse( + response: v1.ListAvailableStoresResult + ): Either[String, Seq[String]] = + Right(response.storeIds) + } + + final case class ListAll(query: BaseQueryX) + extends BaseCommand[ + v1.ListAllRequest, + v1.ListAllResponse, + GenericStoredTopologyTransactionsX, + ] { + override def createRequest(): Either[String, v1.ListAllRequest] = + Right(new v1.ListAllRequest(Some(query.toProtoV1))) + + override def submitRequest( + service: TopologyManagerReadServiceXStub, + request: v1.ListAllRequest, + ): Future[v1.ListAllResponse] = service.listAll(request) + + override def handleResponse( + response: v1.ListAllResponse + ): Either[String, GenericStoredTopologyTransactionsX] = + response.result + .fold[Either[String, GenericStoredTopologyTransactionsX]]( + Right(StoredTopologyTransactionsX.empty) + ) { collection => + StoredTopologyTransactionsX.fromProtoV0(collection).leftMap(_.toString) + } + } + } + + object Write { + abstract class BaseWriteCommand[Req, Res, Ret] extends GrpcAdminCommand[Req, Res, Ret] { + override type Svc = TopologyManagerWriteServiceXStub + + override def createService(channel: ManagedChannel): TopologyManagerWriteServiceXStub = + v1.TopologyManagerWriteServiceXGrpc.stub(channel) + + // command will potentially take a long time + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + } + + final case class AddTransactions( + transactions: Seq[GenericSignedTopologyTransactionX], + store: String, + ) extends BaseWriteCommand[AddTransactionsRequest, AddTransactionsResponse, Unit] { + override def createRequest(): Either[String, AddTransactionsRequest] = { + Right(AddTransactionsRequest(transactions.map(_.toProtoV2), forceChange = false, store)) + } + override def submitRequest( + service: TopologyManagerWriteServiceXStub, + request: AddTransactionsRequest, + ): Future[AddTransactionsResponse] = service.addTransactions(request) + override def handleResponse(response: AddTransactionsResponse): Either[String, Unit] = + Right(()) + } + + final case class SignTransactions( + transactions: Seq[GenericSignedTopologyTransactionX], + signedBy: Seq[Fingerprint], + ) extends BaseWriteCommand[SignTransactionsRequest, SignTransactionsResponse, Seq[ + GenericSignedTopologyTransactionX + ]] { + override def createRequest(): Either[String, SignTransactionsRequest] = { + Right( + SignTransactionsRequest(transactions.map(_.toProtoV2), signedBy.map(_.toProtoPrimitive)) + ) + } + + override def submitRequest( + service: TopologyManagerWriteServiceXStub, + request: SignTransactionsRequest, + ): Future[SignTransactionsResponse] = service.signTransactions(request) + + override def handleResponse( + response: SignTransactionsResponse + ): Either[String, Seq[GenericSignedTopologyTransactionX]] = + response.transactions.traverse(SignedTopologyTransactionX.fromProtoV2).leftMap(_.message) + } + + final case class Propose[M <: TopologyMappingX: ClassTag]( + mapping: Either[String, M], + signedBy: Seq[Fingerprint], + change: TopologyChangeOpX, + serial: Option[PositiveInt], + mustFullyAuthorize: Boolean, + forceChange: Boolean, + store: String, + ) extends BaseWriteCommand[ + AuthorizeRequest, + AuthorizeResponse, + SignedTopologyTransactionX[TopologyChangeOpX, M], + ] { + + override def createRequest(): Either[String, AuthorizeRequest] = mapping.map(m => + AuthorizeRequest( + Proposal( + AuthorizeRequest.Proposal( + change.toProto, + serial.map(_.value).getOrElse(0), + Some(m.toProtoV2), + ) + ), + mustFullyAuthorize = mustFullyAuthorize, + forceChange = false, + signedBy = signedBy.map(_.toProtoPrimitive), + store, + ) + ) + override def submitRequest( + service: TopologyManagerWriteServiceXStub, + request: AuthorizeRequest, + ): Future[AuthorizeResponse] = service.authorize(request) + + override def handleResponse( + response: AuthorizeResponse + ): Either[String, SignedTopologyTransactionX[TopologyChangeOpX, M]] = response.transaction + .toRight("no transaction in response") + .flatMap( + SignedTopologyTransactionX + .fromProtoV2(_) + .leftMap(_.message) + .flatMap(tx => + tx.selectMapping[M] + .toRight( + s"Expected mapping ${ClassTag[M].getClass.getSimpleName}, but received: ${tx.transaction.mapping.getClass.getSimpleName}" + ) + ) + ) + } + object Propose { + def apply[M <: TopologyMappingX: ClassTag]( + mapping: M, + signedBy: Seq[Fingerprint], + store: String, + serial: Option[PositiveInt] = None, + change: TopologyChangeOpX = TopologyChangeOpX.Replace, + mustFullyAuthorize: Boolean = false, + forceChange: Boolean = false, + ): Propose[M] = + Propose(Right(mapping), signedBy, change, serial, mustFullyAuthorize, forceChange, store) + + } + + final case class Authorize[M <: TopologyMappingX: ClassTag]( + transactionHash: String, + mustFullyAuthorize: Boolean, + signedBy: Seq[Fingerprint], + store: String, + ) extends BaseWriteCommand[ + AuthorizeRequest, + AuthorizeResponse, + SignedTopologyTransactionX[TopologyChangeOpX, M], + ] { + + override def createRequest(): Either[String, AuthorizeRequest] = Right( + AuthorizeRequest( + TransactionHash(transactionHash), + mustFullyAuthorize = mustFullyAuthorize, + forceChange = false, + signedBy = signedBy.map(_.toProtoPrimitive), + store = store, + ) + ) + + override def submitRequest( + service: TopologyManagerWriteServiceXStub, + request: AuthorizeRequest, + ): Future[AuthorizeResponse] = service.authorize(request) + + override def handleResponse( + response: AuthorizeResponse + ): Either[String, SignedTopologyTransactionX[TopologyChangeOpX, M]] = response.transaction + .toRight("no transaction in response") + .flatMap( + SignedTopologyTransactionX + .fromProtoV2(_) + .leftMap(_.message) + .flatMap(tx => + tx.selectMapping[M] + .toRight( + s"Expected mapping ${ClassTag[M].getClass.getSimpleName}, but received: ${tx.transaction.mapping.getClass.getSimpleName}" + ) + ) + ) + } + } + + object Init { + + abstract class BaseInitializationService[Req, Resp, Res] + extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = IdentityInitializationServiceXStub + override def createService(channel: ManagedChannel): IdentityInitializationServiceXStub = + v1.IdentityInitializationServiceXGrpc.stub(channel) + } + + final case class InitId(identifier: String) + extends BaseInitializationService[v1.InitIdRequest, v1.InitIdResponse, Unit] { + + override def createRequest(): Either[String, v1.InitIdRequest] = + Right(v1.InitIdRequest(identifier)) + + override def submitRequest( + service: IdentityInitializationServiceXStub, + request: v1.InitIdRequest, + ): Future[v1.InitIdResponse] = + service.initId(request) + + override def handleResponse(response: v1.InitIdResponse): Either[String, Unit] = + Right(()) + } + + final case class GetId() + extends BaseInitializationService[Empty, v1.GetIdResponse, UniqueIdentifier] { + override def createRequest(): Either[String, Empty] = + Right(Empty()) + + override def submitRequest( + service: IdentityInitializationServiceXStub, + request: Empty, + ): Future[v1.GetIdResponse] = + service.getId(request) + + override def handleResponse( + response: v1.GetIdResponse + ): Either[String, UniqueIdentifier] = { + if (response.uniqueIdentifier.nonEmpty) + UniqueIdentifier.fromProtoPrimitive_(response.uniqueIdentifier) + else + Left( + s"Node is not initialized and therefore does not have an Id assigned yet." + ) + } + } + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/VaultAdminCommands.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/VaultAdminCommands.scala new file mode 100644 index 0000000000..aca8cad816 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/VaultAdminCommands.scala @@ -0,0 +1,371 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.commands + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{ + DefaultUnboundedTimeout, + TimeoutType, +} +import com.digitalasset.canton.crypto.admin.grpc.PrivateKeyMetadata +import com.digitalasset.canton.crypto.admin.v0 +import com.digitalasset.canton.crypto.admin.v0.VaultServiceGrpc.VaultServiceStub +import com.digitalasset.canton.crypto.{PublicKeyWithName, v0 as cryptoproto, *} +import com.digitalasset.canton.util.{EitherUtil, OptionUtil} +import com.digitalasset.canton.version.ProtocolVersion +import com.google.protobuf.ByteString +import com.google.protobuf.empty.Empty +import io.grpc.ManagedChannel + +import scala.concurrent.Future + +object VaultAdminCommands { + + abstract class BaseVaultAdminCommand[Req, Res, Result] + extends GrpcAdminCommand[Req, Res, Result] { + override type Svc = VaultServiceStub + override def createService(channel: ManagedChannel): VaultServiceStub = + v0.VaultServiceGrpc.stub(channel) + } + + abstract class ListKeys[R, T]( + filterFingerprint: String, + filterName: String, + filterPurpose: Set[KeyPurpose] = Set.empty, + ) extends BaseVaultAdminCommand[v0.ListKeysRequest, R, Seq[T]] { + + override def createRequest(): Either[String, v0.ListKeysRequest] = + Right( + v0.ListKeysRequest( + filterFingerprint = filterFingerprint, + filterName = filterName, + filterPurpose = filterPurpose.map(_.toProtoEnum).toSeq, + ) + ) + } + + // list keys in my key vault + final case class ListMyKeys( + filterFingerprint: String, + filterName: String, + filterPurpose: Set[KeyPurpose] = Set.empty, + ) extends ListKeys[v0.ListMyKeysResponse, PrivateKeyMetadata]( + filterFingerprint, + filterName, + filterPurpose, + ) { + + override def submitRequest( + service: VaultServiceStub, + request: v0.ListKeysRequest, + ): Future[v0.ListMyKeysResponse] = + service.listMyKeys(request) + + override def handleResponse( + response: v0.ListMyKeysResponse + ): Either[String, Seq[PrivateKeyMetadata]] = + response.privateKeysMetadata.traverse(PrivateKeyMetadata.fromProtoV0).leftMap(_.toString) + } + + // list public keys in key registry + final case class ListPublicKeys( + filterFingerprint: String, + filterName: String, + filterPurpose: Set[KeyPurpose] = Set.empty, + ) extends ListKeys[v0.ListKeysResponse, PublicKeyWithName]( + filterFingerprint, + filterName, + filterPurpose, + ) { + + override def submitRequest( + service: VaultServiceStub, + request: v0.ListKeysRequest, + ): Future[v0.ListKeysResponse] = + service.listPublicKeys(request) + + override def handleResponse( + response: v0.ListKeysResponse + ): Either[String, Seq[PublicKeyWithName]] = + response.publicKeys.traverse(PublicKeyWithName.fromProtoV0).leftMap(_.toString) + } + + abstract class BaseImportPublicKey + extends BaseVaultAdminCommand[ + v0.ImportPublicKeyRequest, + v0.ImportPublicKeyResponse, + Fingerprint, + ] { + + override def submitRequest( + service: VaultServiceStub, + request: v0.ImportPublicKeyRequest, + ): Future[v0.ImportPublicKeyResponse] = + service.importPublicKey(request) + + override def handleResponse(response: v0.ImportPublicKeyResponse): Either[String, Fingerprint] = + Fingerprint.fromProtoPrimitive(response.fingerprint).leftMap(_.toString) + } + + // upload a public key into the key registry + final case class ImportPublicKey(publicKey: ByteString, name: Option[String]) + extends BaseImportPublicKey { + + override def createRequest(): Either[String, v0.ImportPublicKeyRequest] = + Right(v0.ImportPublicKeyRequest(publicKey = publicKey, name = name.getOrElse(""))) + } + + final case class GenerateSigningKey(name: String, scheme: Option[SigningKeyScheme]) + extends BaseVaultAdminCommand[ + v0.GenerateSigningKeyRequest, + v0.GenerateSigningKeyResponse, + SigningPublicKey, + ] { + + override def createRequest(): Either[String, v0.GenerateSigningKeyRequest] = + Right( + v0.GenerateSigningKeyRequest( + name = name, + keyScheme = scheme.fold[cryptoproto.SigningKeyScheme]( + cryptoproto.SigningKeyScheme.MissingSigningKeyScheme + )(_.toProtoEnum), + ) + ) + + override def submitRequest( + service: VaultServiceStub, + request: v0.GenerateSigningKeyRequest, + ): Future[v0.GenerateSigningKeyResponse] = { + service.generateSigningKey(request) + } + + override def handleResponse( + response: v0.GenerateSigningKeyResponse + ): Either[String, SigningPublicKey] = + response.publicKey + .toRight("No public key returned") + .flatMap(k => SigningPublicKey.fromProtoV0(k).leftMap(_.toString)) + + // may take some time if we need to wait for entropy + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + + final case class GenerateEncryptionKey(name: String, scheme: Option[EncryptionKeyScheme]) + extends BaseVaultAdminCommand[ + v0.GenerateEncryptionKeyRequest, + v0.GenerateEncryptionKeyResponse, + EncryptionPublicKey, + ] { + + override def createRequest(): Either[String, v0.GenerateEncryptionKeyRequest] = + Right( + v0.GenerateEncryptionKeyRequest( + name = name, + keyScheme = scheme.fold[cryptoproto.EncryptionKeyScheme]( + cryptoproto.EncryptionKeyScheme.MissingEncryptionKeyScheme + )(_.toProtoEnum), + ) + ) + + override def submitRequest( + service: VaultServiceStub, + request: v0.GenerateEncryptionKeyRequest, + ): Future[v0.GenerateEncryptionKeyResponse] = { + service.generateEncryptionKey(request) + } + + override def handleResponse( + response: v0.GenerateEncryptionKeyResponse + ): Either[String, EncryptionPublicKey] = + response.publicKey + .toRight("No public key returned") + .flatMap(k => EncryptionPublicKey.fromProtoV0(k).leftMap(_.toString)) + + // may time some time if we need to wait for entropy + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + + } + + final case class RegisterKmsSigningKey(kmsKeyId: String, name: String) + extends BaseVaultAdminCommand[ + v0.RegisterKmsSigningKeyRequest, + v0.RegisterKmsSigningKeyResponse, + SigningPublicKey, + ] { + + override def createRequest(): Either[String, v0.RegisterKmsSigningKeyRequest] = + Right( + v0.RegisterKmsSigningKeyRequest( + kmsKeyId = kmsKeyId, + name = name, + ) + ) + + override def submitRequest( + service: VaultServiceStub, + request: v0.RegisterKmsSigningKeyRequest, + ): Future[v0.RegisterKmsSigningKeyResponse] = { + service.registerKmsSigningKey(request) + } + + override def handleResponse( + response: v0.RegisterKmsSigningKeyResponse + ): Either[String, SigningPublicKey] = + response.publicKey + .toRight("No public key returned") + .flatMap(k => SigningPublicKey.fromProtoV0(k).leftMap(_.toString)) + + } + + final case class RegisterKmsEncryptionKey(kmsKeyId: String, name: String) + extends BaseVaultAdminCommand[ + v0.RegisterKmsEncryptionKeyRequest, + v0.RegisterKmsEncryptionKeyResponse, + EncryptionPublicKey, + ] { + + override def createRequest(): Either[String, v0.RegisterKmsEncryptionKeyRequest] = + Right( + v0.RegisterKmsEncryptionKeyRequest( + kmsKeyId = kmsKeyId, + name = name, + ) + ) + + override def submitRequest( + service: VaultServiceStub, + request: v0.RegisterKmsEncryptionKeyRequest, + ): Future[v0.RegisterKmsEncryptionKeyResponse] = { + service.registerKmsEncryptionKey(request) + } + + override def handleResponse( + response: v0.RegisterKmsEncryptionKeyResponse + ): Either[String, EncryptionPublicKey] = + response.publicKey + .toRight("No public key returned") + .flatMap(k => EncryptionPublicKey.fromProtoV0(k).leftMap(_.toString)) + + } + + final case class RotateWrapperKey(newWrapperKeyId: String) + extends BaseVaultAdminCommand[ + v0.RotateWrapperKeyRequest, + Empty, + Unit, + ] { + + override def createRequest(): Either[String, v0.RotateWrapperKeyRequest] = + Right( + v0.RotateWrapperKeyRequest( + newWrapperKeyId = newWrapperKeyId + ) + ) + + override def submitRequest( + service: VaultServiceStub, + request: v0.RotateWrapperKeyRequest, + ): Future[Empty] = { + service.rotateWrapperKey(request) + } + + override def handleResponse(response: Empty): Either[String, Unit] = Right(()) + + } + + final case class GetWrapperKeyId() + extends BaseVaultAdminCommand[ + v0.GetWrapperKeyIdRequest, + v0.GetWrapperKeyIdResponse, + String, + ] { + + override def createRequest(): Either[String, v0.GetWrapperKeyIdRequest] = + Right( + v0.GetWrapperKeyIdRequest() + ) + + override def submitRequest( + service: VaultServiceStub, + request: v0.GetWrapperKeyIdRequest, + ): Future[v0.GetWrapperKeyIdResponse] = { + service.getWrapperKeyId(request) + } + + override def handleResponse( + response: v0.GetWrapperKeyIdResponse + ): Either[String, String] = + Right(response.wrapperKeyId) + + } + + final case class ImportKeyPair(keyPair: ByteString, name: Option[String]) + extends BaseVaultAdminCommand[ + v0.ImportKeyPairRequest, + v0.ImportKeyPairResponse, + Unit, + ] { + + override def createRequest(): Either[String, v0.ImportKeyPairRequest] = + Right(v0.ImportKeyPairRequest(keyPair = keyPair, name = OptionUtil.noneAsEmptyString(name))) + + override def submitRequest( + service: VaultServiceStub, + request: v0.ImportKeyPairRequest, + ): Future[v0.ImportKeyPairResponse] = + service.importKeyPair(request) + + override def handleResponse(response: v0.ImportKeyPairResponse): Either[String, Unit] = + EitherUtil.unit + } + + final case class ExportKeyPair(fingerprint: Fingerprint, protocolVersion: ProtocolVersion) + extends BaseVaultAdminCommand[ + v0.ExportKeyPairRequest, + v0.ExportKeyPairResponse, + ByteString, + ] { + + override def createRequest(): Either[String, v0.ExportKeyPairRequest] = { + Right( + v0.ExportKeyPairRequest( + fingerprint = fingerprint.toProtoPrimitive, + protocolVersion = protocolVersion.toProtoPrimitive, + ) + ) + } + + override def submitRequest( + service: VaultServiceStub, + request: v0.ExportKeyPairRequest, + ): Future[v0.ExportKeyPairResponse] = + service.exportKeyPair(request) + + override def handleResponse(response: v0.ExportKeyPairResponse): Either[String, ByteString] = + Right(response.keyPair) + } + + final case class DeleteKeyPair(fingerprint: Fingerprint) + extends BaseVaultAdminCommand[ + v0.DeleteKeyPairRequest, + v0.DeleteKeyPairResponse, + Unit, + ] { + + override def createRequest(): Either[String, v0.DeleteKeyPairRequest] = { + Right(v0.DeleteKeyPairRequest(fingerprint = fingerprint.toProtoPrimitive)) + } + + override def submitRequest( + service: VaultServiceStub, + request: v0.DeleteKeyPairRequest, + ): Future[v0.DeleteKeyPairResponse] = + service.deleteKeyPair(request) + + override def handleResponse(response: v0.DeleteKeyPairResponse): Either[String, Unit] = + EitherUtil.unit + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/CommunityCantonStatus.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/CommunityCantonStatus.scala new file mode 100644 index 0000000000..4712517663 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/CommunityCantonStatus.scala @@ -0,0 +1,104 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.data + +import cats.Show +import com.digitalasset.canton.admin.api.client.data.CantonStatus.splitSuccessfulAndFailedStatus +import com.digitalasset.canton.console.{DomainReference, ParticipantReference} +import com.digitalasset.canton.health.admin.data.{DomainStatus, NodeStatus, ParticipantStatus} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.util.ShowUtil.* + +trait CantonStatus extends PrettyPrinting { + protected def descriptions[Status <: NodeStatus.Status]( + statusMap: Map[String, Status], + failureMap: Map[String, NodeStatus.Failure], + instanceType: String, + ): Seq[String] = { + + val success = sort(statusMap) + .map { case (d, status) => + show"Status for ${instanceType.unquoted} ${d.singleQuoted}:\n$status" + } + + val failure = sort(failureMap) + .map { case (d, status) => + show"${instanceType.unquoted} ${d.singleQuoted} cannot be reached: ${status.msg}" + } + + success ++ failure + } + + private def sort[K: Ordering, V](status: Map[K, V]): Seq[(K, V)] = + status.toSeq.sortBy(_._1) +} + +object CantonStatus { + def splitSuccessfulAndFailedStatus[K: Show, S <: NodeStatus.Status]( + nodes: Map[K, () => NodeStatus[S]], + instanceType: String, + ): (Map[K, S], Map[K, NodeStatus.Failure]) = { + val map: Map[K, NodeStatus[S]] = + nodes.map { case (node, getStatus) => + node -> getStatus() + } + val status: Map[K, S] = + map.collect { case (n, NodeStatus.Success(status)) => + n -> status + } + val unreachable: Map[K, NodeStatus.Failure] = + map.collect { + case (s, entry: NodeStatus.Failure) => s -> entry + case (s, _: NodeStatus.NotInitialized) => + s -> NodeStatus.Failure( + s"${instanceType.unquoted} ${s.show.singleQuoted} has not been initialized" + ) + } + (status, unreachable) + } +} + +object CommunityCantonStatus { + def getStatus( + domains: Map[String, () => NodeStatus[DomainStatus]], + participants: Map[String, () => NodeStatus[ParticipantStatus]], + ): CommunityCantonStatus = { + val (domainStatus, unreachableDomains) = + splitSuccessfulAndFailedStatus(domains, DomainReference.InstanceType) + val (participantStatus, unreachableParticipants) = + splitSuccessfulAndFailedStatus(participants, ParticipantReference.InstanceType) + + CommunityCantonStatus( + domainStatus, + unreachableDomains, + participantStatus, + unreachableParticipants, + ) + } +} + +final case class CommunityCantonStatus( + domainStatus: Map[String, DomainStatus], + unreachableDomains: Map[String, NodeStatus.Failure], + participantStatus: Map[String, ParticipantStatus], + unreachableParticipants: Map[String, NodeStatus.Failure], +) extends CantonStatus { + def tupled: (Map[String, DomainStatus], Map[String, ParticipantStatus]) = + (domainStatus, participantStatus) + + override def pretty: Pretty[CommunityCantonStatus] = prettyOfString { _ => + val domains = descriptions( + domainStatus, + unreachableDomains, + DomainReference.InstanceType, + ) + val participants = + descriptions( + participantStatus, + unreachableParticipants, + ParticipantReference.InstanceType, + ) + (domains ++ participants).mkString(System.lineSeparator() * 2) + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/ConsoleApiDataObjects.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/ConsoleApiDataObjects.scala new file mode 100644 index 0000000000..c086344213 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/ConsoleApiDataObjects.scala @@ -0,0 +1,52 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.data + +import com.digitalasset.canton.DomainAlias +import com.digitalasset.canton.participant.admin.{v0 as participantAdminV0} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.* + +final case class ListConnectedDomainsResult( + domainAlias: DomainAlias, + domainId: DomainId, + healthy: Boolean, +) + +object ListConnectedDomainsResult { + def fromProtoV0( + value: participantAdminV0.ListConnectedDomainsResponse.Result + ): ParsingResult[ListConnectedDomainsResult] = { + val participantAdminV0.ListConnectedDomainsResponse.Result(domainAlias, domainId, healthy) = + value + for { + domainId <- DomainId.fromProtoPrimitive(domainId, "domainId") + domainAlias <- DomainAlias.fromProtoPrimitive(domainAlias) + + } yield ListConnectedDomainsResult( + domainAlias = domainAlias, + domainId = domainId, + healthy = healthy, + ) + + } +} + +final case class DarMetadata( + name: String, + main: String, + packages: Seq[String], + dependencies: Seq[String], +) + +object DarMetadata { + def fromProtoV0( + value: participantAdminV0.ListDarContentsResponse + ): ParsingResult[DarMetadata] = { + val participantAdminV0.ListDarContentsResponse(description, main, packages, dependencies) = + value + Right(DarMetadata(description, main, packages, dependencies)) + } + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/DomainParameters.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/DomainParameters.scala new file mode 100644 index 0000000000..2c3b3b34f1 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/DomainParameters.scala @@ -0,0 +1,180 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.data + +import cats.syntax.either.* +import com.daml.nonempty.NonEmptyUtil +import com.digitalasset.canton.admin.api.client.data.crypto.* +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.config.{NonNegativeFiniteDuration, PositiveDurationSeconds} +import com.digitalasset.canton.protocol.DynamicDomainParameters.InvalidDynamicDomainParameters +import com.digitalasset.canton.protocol.{ + DynamicDomainParameters as DynamicDomainParametersInternal, + StaticDomainParameters as StaticDomainParametersInternal, + v2 as protocolV2, +} +import com.digitalasset.canton.topology.admin.v0.DomainParametersChangeAuthorization +import com.digitalasset.canton.util.BinaryFileUtil +import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.{crypto as DomainCrypto} +import com.google.common.annotations.VisibleForTesting +import io.scalaland.chimney.dsl.* + +import scala.Ordering.Implicits.* + +final case class StaticDomainParameters( + uniqueContractKeys: Boolean, + requiredSigningKeySchemes: Set[SigningKeyScheme], + requiredEncryptionKeySchemes: Set[EncryptionKeyScheme], + requiredSymmetricKeySchemes: Set[SymmetricKeyScheme], + requiredHashAlgorithms: Set[HashAlgorithm], + requiredCryptoKeyFormats: Set[CryptoKeyFormat], + protocolVersion: ProtocolVersion, +) { + def writeToFile(outputFile: String): Unit = + BinaryFileUtil.writeByteStringToFile(outputFile, toInternal.toByteString) + + private[canton] def toInternal: StaticDomainParametersInternal = + StaticDomainParametersInternal.create( + uniqueContractKeys = uniqueContractKeys, + requiredSigningKeySchemes = NonEmptyUtil.fromUnsafe( + requiredSigningKeySchemes.map(_.transformInto[DomainCrypto.SigningKeyScheme]) + ), + requiredEncryptionKeySchemes = NonEmptyUtil.fromUnsafe( + requiredEncryptionKeySchemes.map(_.transformInto[DomainCrypto.EncryptionKeyScheme]) + ), + requiredSymmetricKeySchemes = NonEmptyUtil.fromUnsafe( + requiredSymmetricKeySchemes.map(_.transformInto[DomainCrypto.SymmetricKeyScheme]) + ), + requiredHashAlgorithms = NonEmptyUtil.fromUnsafe( + requiredHashAlgorithms.map(_.transformInto[DomainCrypto.HashAlgorithm]) + ), + requiredCryptoKeyFormats = NonEmptyUtil.fromUnsafe( + requiredCryptoKeyFormats.map(_.transformInto[DomainCrypto.CryptoKeyFormat]) + ), + protocolVersion = protocolVersion, + ) +} + +object StaticDomainParameters { + + def apply( + domain: StaticDomainParametersInternal + ): StaticDomainParameters = + StaticDomainParameters( + uniqueContractKeys = domain.uniqueContractKeys, + requiredSigningKeySchemes = + domain.requiredSigningKeySchemes.forgetNE.map(_.transformInto[SigningKeyScheme]), + requiredEncryptionKeySchemes = + domain.requiredEncryptionKeySchemes.forgetNE.map(_.transformInto[EncryptionKeyScheme]), + requiredSymmetricKeySchemes = + domain.requiredSymmetricKeySchemes.forgetNE.map(_.transformInto[SymmetricKeyScheme]), + requiredHashAlgorithms = + domain.requiredHashAlgorithms.forgetNE.map(_.transformInto[HashAlgorithm]), + requiredCryptoKeyFormats = + domain.requiredCryptoKeyFormats.forgetNE.map(_.transformInto[CryptoKeyFormat]), + protocolVersion = domain.protocolVersion, + ) + + def tryReadFromFile(inputFile: String): StaticDomainParameters = { + val staticDomainParametersInternal = StaticDomainParametersInternal + .readFromFile(inputFile) + .valueOr(err => + throw new IllegalArgumentException( + s"Reading static domain parameters from file $inputFile failed: $err" + ) + ) + + StaticDomainParameters(staticDomainParametersInternal) + } +} + +// TODO(#15650) Properly expose new BFT parameters and domain limits +final case class DynamicDomainParameters( + participantResponseTimeout: NonNegativeFiniteDuration, + mediatorReactionTimeout: NonNegativeFiniteDuration, + transferExclusivityTimeout: NonNegativeFiniteDuration, + topologyChangeDelay: NonNegativeFiniteDuration, + ledgerTimeRecordTimeTolerance: NonNegativeFiniteDuration, + mediatorDeduplicationTimeout: NonNegativeFiniteDuration, + reconciliationInterval: PositiveDurationSeconds, + maxRatePerParticipant: NonNegativeInt, + maxRequestSize: NonNegativeInt, + sequencerAggregateSubmissionTimeout: NonNegativeFiniteDuration, +) { + + if (ledgerTimeRecordTimeTolerance * 2 > mediatorDeduplicationTimeout) + throw new InvalidDynamicDomainParameters( + s"The ledgerTimeRecordTimeTolerance ($ledgerTimeRecordTimeTolerance) must be at most half of the " + + s"mediatorDeduplicationTimeout ($mediatorDeduplicationTimeout)." + ) + + // https://docs.google.com/document/d/1tpPbzv2s6bjbekVGBn6X5VZuw0oOTHek5c30CBo4UkI/edit#bookmark=id.1dzc6dxxlpca + private[canton] def compatibleWithNewLedgerTimeRecordTimeTolerance( + newLedgerTimeRecordTimeTolerance: NonNegativeFiniteDuration + ): Boolean = { + // If false, a new request may receive the same ledger time as a previous request and the previous + // request may be evicted too early from the mediator's deduplication store. + // Thus, an attacker may assign the same UUID to both requests. + // See i9028 for a detailed design. (This is the second clause of item 2 of Lemma 2). + ledgerTimeRecordTimeTolerance + newLedgerTimeRecordTimeTolerance <= mediatorDeduplicationTimeout + } + + def update( + participantResponseTimeout: NonNegativeFiniteDuration = participantResponseTimeout, + mediatorReactionTimeout: NonNegativeFiniteDuration = mediatorReactionTimeout, + transferExclusivityTimeout: NonNegativeFiniteDuration = transferExclusivityTimeout, + topologyChangeDelay: NonNegativeFiniteDuration = topologyChangeDelay, + ledgerTimeRecordTimeTolerance: NonNegativeFiniteDuration = ledgerTimeRecordTimeTolerance, + ): DynamicDomainParameters = this.copy( + participantResponseTimeout = participantResponseTimeout, + mediatorReactionTimeout = mediatorReactionTimeout, + transferExclusivityTimeout = transferExclusivityTimeout, + topologyChangeDelay = topologyChangeDelay, + ledgerTimeRecordTimeTolerance = ledgerTimeRecordTimeTolerance, + ) + + def toProto: DomainParametersChangeAuthorization.Parameters = + DomainParametersChangeAuthorization.Parameters.ParametersV1( + protocolV2.DynamicDomainParameters( + participantResponseTimeout = Some(participantResponseTimeout.toProtoPrimitive), + mediatorReactionTimeout = Some(mediatorReactionTimeout.toProtoPrimitive), + transferExclusivityTimeout = Some(transferExclusivityTimeout.toProtoPrimitive), + topologyChangeDelay = Some(topologyChangeDelay.toProtoPrimitive), + ledgerTimeRecordTimeTolerance = Some(ledgerTimeRecordTimeTolerance.toProtoPrimitive), + mediatorDeduplicationTimeout = Some(mediatorDeduplicationTimeout.toProtoPrimitive), + reconciliationInterval = Some(reconciliationInterval.toProtoPrimitive), + defaultParticipantLimits = Some( + protocolV2.ParticipantDomainLimits( + maxRate = maxRatePerParticipant.unwrap, + maxNumParties = 0, + maxNumPackages = 0, + ) + ), + maxRequestSize = maxRequestSize.unwrap, + permissionedDomain = false, + requiredPackages = Nil, + onlyRequiredPackagesPermitted = false, + defaultMaxHostingParticipantsPerParty = 0, + sequencerAggregateSubmissionTimeout = + Some(sequencerAggregateSubmissionTimeout.toProtoPrimitive), + trafficControlParameters = None, + ) + ) +} + +object DynamicDomainParameters { + + /** Default dynamic domain parameters for non-static clocks */ + @VisibleForTesting + def defaultValues(protocolVersion: ProtocolVersion): DynamicDomainParameters = + DynamicDomainParameters( + DynamicDomainParametersInternal.defaultValues(protocolVersion) + ) + + def apply( + domain: DynamicDomainParametersInternal + ): DynamicDomainParameters = + domain.transformInto[DynamicDomainParameters] +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/LedgerApiObjectMeta.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/LedgerApiObjectMeta.scala new file mode 100644 index 0000000000..018f38990f --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/LedgerApiObjectMeta.scala @@ -0,0 +1,14 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.data + +final case class LedgerApiObjectMeta( + resourceVersion: String, + annotations: Map[String, String], +) + +object LedgerApiObjectMeta { + def empty: LedgerApiObjectMeta = + LedgerApiObjectMeta(resourceVersion = "", annotations = Map.empty) +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/Metering.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/Metering.scala new file mode 100644 index 0000000000..4e7a2d8158 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/Metering.scala @@ -0,0 +1,66 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.data + +import com.daml.ledger.api.v1.admin.metering_report_service.GetMeteringReportResponse +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.google.protobuf.struct +import com.google.protobuf.struct.Value.Kind +import com.google.protobuf.struct.{ListValue, Struct} +import io.circe.Decoder.Result +import io.circe.Json.* +import io.circe.* + +object LedgerMeteringReport { + + def fromProtoV0( + value: GetMeteringReportResponse + ): ParsingResult[String] = { + + for { + s <- ProtoConverter.required("meteringReportJson", value.meteringReportJson) + } yield { + StructEncoderDecoder(s).spaces2 + } + + } +} + +object StructEncoderDecoder extends Encoder[struct.Struct] with Decoder[struct.Struct] { + + override def apply(s: struct.Struct): Json = { + write(struct.Value.of(Kind.StructValue(s))) + } + + override def apply(c: HCursor): Result[struct.Struct] = { + val value = read(c.value) + if (value.kind.isStructValue) Right(value.getStructValue) + else Left(DecodingFailure(s"Expected struct, not $value", Nil)) + } + + private def write(value: struct.Value): Json = { + value.kind match { + case Kind.BoolValue(v) => Json.fromBoolean(v) + case Kind.ListValue(v) => Json.fromValues(v.values.map(write)) + case Kind.NumberValue(v) => Json.fromDoubleOrNull(v) + case Kind.StringValue(v) => Json.fromString(v) + case Kind.StructValue(v) => Json.fromFields(v.fields.view.mapValues(write)) + case Kind.Empty | Kind.NullValue(_) => Json.Null + } + } + + object StructFolder extends Folder[Kind] { + def onNull = Kind.NullValue(struct.NullValue.NULL_VALUE) + def onBoolean(value: Boolean) = Kind.BoolValue(value) + def onNumber(value: JsonNumber) = Kind.NumberValue(value.toDouble) + def onString(value: String) = Kind.StringValue(value) + def onArray(value: Vector[Json]) = Kind.ListValue(ListValue(value.map(read))) + def onObject(value: JsonObject) = + Kind.StructValue(Struct.of(value.toMap.view.mapValues(read).toMap)) + } + + private def read(c: Json): struct.Value = struct.Value.of(c.foldWith(StructFolder)) + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/PartyDetails.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/PartyDetails.scala new file mode 100644 index 0000000000..9d26fca780 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/PartyDetails.scala @@ -0,0 +1,49 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.data + +import com.daml.ledger.api.v1.admin.object_meta.ObjectMeta as ProtoObjectMeta +import com.daml.ledger.api.v1.admin.party_management_service.PartyDetails as ProtoPartyDetails +import com.digitalasset.canton.topology.PartyId + +import scala.util.control.NoStackTrace + +/** Represents a party details value exposed in the Canton console + */ +final case class PartyDetails( + party: PartyId, + displayName: String, + isLocal: Boolean, + annotations: Map[String, String], + identityProviderId: String, +) + +object PartyDetails { + def fromProtoPartyDetails(details: ProtoPartyDetails): PartyDetails = PartyDetails( + party = PartyId.tryFromProtoPrimitive(details.party), + displayName = details.displayName, + isLocal = details.isLocal, + annotations = details.localMetadata.fold(Map.empty[String, String])(_.annotations), + identityProviderId = details.identityProviderId, + ) + def toProtoPartyDetails( + details: PartyDetails, + resourceVersionO: Option[String], + ): ProtoPartyDetails = ProtoPartyDetails( + party = details.party.toString, + displayName = details.displayName, + isLocal = details.isLocal, + localMetadata = Some( + ProtoObjectMeta( + resourceVersion = resourceVersionO.getOrElse(""), + annotations = details.annotations, + ) + ), + identityProviderId = details.identityProviderId, + ) +} + +final case class ModifyingNonModifiablePartyDetailsPropertiesError() + extends RuntimeException("MODIFYING_AN_UNMODIFIABLE_PARTY_DETAILS_PROPERTY_ERROR") + with NoStackTrace diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/PruningSchedule.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/PruningSchedule.scala new file mode 100644 index 0000000000..ea390346fd --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/PruningSchedule.scala @@ -0,0 +1,54 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.data + +import com.digitalasset.canton.pruning.admin.v0 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.{config, participant, scheduler} + +final case class PruningSchedule( + cron: String, + maxDuration: config.PositiveDurationSeconds, + retention: config.PositiveDurationSeconds, +) + +object PruningSchedule { + private[admin] def fromProtoV0(scheduleP: v0.PruningSchedule): ParsingResult[PruningSchedule] = + for { + maxDuration <- config.PositiveDurationSeconds.fromProtoPrimitiveO("max_duration")( + scheduleP.maxDuration + ) + retention <- config.PositiveDurationSeconds.fromProtoPrimitiveO("retention")( + scheduleP.retention + ) + } yield PruningSchedule(scheduleP.cron, maxDuration, retention) + + private[data] def fromInternal( + internalSchedule: scheduler.PruningSchedule + ): PruningSchedule = + PruningSchedule( + internalSchedule.cron.toProtoPrimitive, + config.PositiveDurationSeconds(internalSchedule.maxDuration.toScala), + config.PositiveDurationSeconds(internalSchedule.retention.toScala), + ) +} + +final case class ParticipantPruningSchedule( + schedule: PruningSchedule, + pruneInternallyOnly: Boolean, +) + +object ParticipantPruningSchedule { + private[admin] def fromProtoV0( + participantSchedule: v0.ParticipantPruningSchedule + ): ParsingResult[ParticipantPruningSchedule] = + for { + internalSchedule <- participant.scheduler.ParticipantPruningSchedule.fromProtoV0( + participantSchedule + ) + } yield ParticipantPruningSchedule( + PruningSchedule.fromInternal(internalSchedule.schedule), + participantSchedule.pruneInternallyOnly, + ) +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/TemplateId.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/TemplateId.scala new file mode 100644 index 0000000000..c6624c18b4 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/TemplateId.scala @@ -0,0 +1,53 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.data + +import com.daml.ledger.api.v1.ValueOuterClass +import com.daml.ledger.api.v1.value.Identifier +import com.daml.ledger.javaapi + +final case class TemplateId( + packageId: String, + moduleName: String, + entityName: String, +) { + def toIdentifier: Identifier = Identifier( + packageId = packageId, + moduleName = moduleName, + entityName = entityName, + ) + + def toJavaIdentifier: javaapi.data.Identifier = new javaapi.data.Identifier( + packageId, + moduleName, + entityName, + ) + + def isModuleEntity(moduleName: String, entityName: String) = + this.moduleName == moduleName && this.entityName == entityName +} + +object TemplateId { + + def fromIdentifier(identifier: Identifier): TemplateId = { + TemplateId( + packageId = identifier.packageId, + moduleName = identifier.moduleName, + entityName = identifier.entityName, + ) + } + + def templateIdsFromJava(identifiers: javaapi.data.Identifier*): Seq[TemplateId] = { + identifiers.map(fromJavaIdentifier) + } + + def fromJavaProtoIdentifier(templateId: ValueOuterClass.Identifier): TemplateId = { + fromIdentifier(Identifier.fromJavaProto(templateId)) + } + + def fromJavaIdentifier(templateId: javaapi.data.Identifier): TemplateId = { + fromJavaProtoIdentifier(templateId.toProto) + } + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/Topology.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/Topology.scala new file mode 100644 index 0000000000..2159990797 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/Topology.scala @@ -0,0 +1,247 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.data + +import cats.syntax.traverse.* +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.admin.api.client.data.ListPartiesResult.ParticipantDomains +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.protocol.{DynamicDomainParameters as DynamicDomainParametersInternal} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.admin.v0 +import com.digitalasset.canton.topology.admin.v0.ListDomainParametersChangesResult.Result.Parameters +import com.digitalasset.canton.topology.transaction.* +import com.google.protobuf.ByteString + +import java.time.Instant + +final case class ListPartiesResult(party: PartyId, participants: Seq[ParticipantDomains]) + +object ListPartiesResult { + final case class DomainPermission(domain: DomainId, permission: ParticipantPermission) + final case class ParticipantDomains(participant: ParticipantId, domains: Seq[DomainPermission]) + + private def fromProtoV0( + value: v0.ListPartiesResponse.Result.ParticipantDomains.DomainPermissions + ): ParsingResult[DomainPermission] = + for { + domainId <- DomainId.fromProtoPrimitive(value.domain, "domain") + permission <- ParticipantPermission.fromProtoEnum(value.permission) + } yield DomainPermission(domainId, permission) + + private def fromProtoV0( + value: v0.ListPartiesResponse.Result.ParticipantDomains + ): ParsingResult[ParticipantDomains] = + for { + participantId <- ParticipantId.fromProtoPrimitive(value.participant, "participant") + domains <- value.domains.traverse(fromProtoV0) + } yield ParticipantDomains(participantId, domains) + + def fromProtoV0( + value: v0.ListPartiesResponse.Result + ): ParsingResult[ListPartiesResult] = + for { + partyUid <- UniqueIdentifier.fromProtoPrimitive(value.party, "party") + participants <- value.participants.traverse(fromProtoV0) + } yield ListPartiesResult(PartyId(partyUid), participants) +} + +final case class ListKeyOwnersResult( + store: DomainId, + owner: Member, + signingKeys: Seq[SigningPublicKey], + encryptionKeys: Seq[EncryptionPublicKey], +) { + def keys(purpose: KeyPurpose): Seq[PublicKey] = purpose match { + case KeyPurpose.Signing => signingKeys + case KeyPurpose.Encryption => encryptionKeys + } +} + +object ListKeyOwnersResult { + def fromProtoV0( + value: v0.ListKeyOwnersResponse.Result + ): ParsingResult[ListKeyOwnersResult] = + for { + domain <- DomainId.fromProtoPrimitive(value.domain, "domain") + owner <- Member.fromProtoPrimitive(value.keyOwner, "keyOwner") + signingKeys <- value.signingKeys.traverse(SigningPublicKey.fromProtoV0) + encryptionKeys <- value.encryptionKeys.traverse(EncryptionPublicKey.fromProtoV0) + } yield ListKeyOwnersResult(domain, owner, signingKeys, encryptionKeys) +} + +final case class BaseResult( + domain: String, + validFrom: Instant, + validUntil: Option[Instant], + operation: TopologyChangeOp, + serialized: ByteString, + signedBy: Fingerprint, +) + +object BaseResult { + def fromProtoV0(value: v0.BaseResult): ParsingResult[BaseResult] = + for { + protoValidFrom <- ProtoConverter.required("valid_from", value.validFrom) + validFrom <- ProtoConverter.InstantConverter.fromProtoPrimitive(protoValidFrom) + validUntil <- value.validUntil.traverse(ProtoConverter.InstantConverter.fromProtoPrimitive) + operation <- TopologyChangeOp.fromProtoV0(value.operation) + signedBy <- Fingerprint.fromProtoPrimitive(value.signedByFingerprint) + + } yield BaseResult(value.store, validFrom, validUntil, operation, value.serialized, signedBy) +} + +final case class ListPartyToParticipantResult(context: BaseResult, item: PartyToParticipant) + +object ListPartyToParticipantResult { + def fromProtoV0( + value: v0.ListPartyToParticipantResult.Result + ): ParsingResult[ListPartyToParticipantResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV0(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- PartyToParticipant.fromProtoV0(itemProto) + } yield ListPartyToParticipantResult(context, item) +} + +final case class ListOwnerToKeyMappingResult( + context: BaseResult, + item: OwnerToKeyMapping, + key: Fingerprint, +) + +object ListOwnerToKeyMappingResult { + def fromProtoV0( + value: v0.ListOwnerToKeyMappingResult.Result + ): ParsingResult[ListOwnerToKeyMappingResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV0(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- OwnerToKeyMapping.fromProtoV0(itemProto) + key <- Fingerprint.fromProtoPrimitive(value.keyFingerprint) + } yield ListOwnerToKeyMappingResult(context, item, key) +} + +final case class ListNamespaceDelegationResult( + context: BaseResult, + item: NamespaceDelegation, + targetKey: Fingerprint, +) + +object ListNamespaceDelegationResult { + def fromProtoV0( + value: v0.ListNamespaceDelegationResult.Result + ): ParsingResult[ListNamespaceDelegationResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV0(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- NamespaceDelegation.fromProtoV0(itemProto) + targetKey <- Fingerprint.fromProtoPrimitive(value.targetKeyFingerprint) + } yield ListNamespaceDelegationResult(context, item, targetKey) +} + +final case class ListIdentifierDelegationResult( + context: BaseResult, + item: IdentifierDelegation, + targetKey: Fingerprint, +) + +object ListIdentifierDelegationResult { + def fromProtoV0( + value: v0.ListIdentifierDelegationResult.Result + ): ParsingResult[ListIdentifierDelegationResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV0(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- IdentifierDelegation.fromProtoV0(itemProto) + targetKey <- Fingerprint.fromProtoPrimitive(value.targetKeyFingerprint) + } yield ListIdentifierDelegationResult(context, item, targetKey) +} + +final case class ListSignedLegalIdentityClaimResult(context: BaseResult, item: LegalIdentityClaim) + +object ListSignedLegalIdentityClaimResult { + def fromProtoV0( + value: v0.ListSignedLegalIdentityClaimResult.Result + ): ParsingResult[ListSignedLegalIdentityClaimResult] = { + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV0(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- SignedLegalIdentityClaim.fromProtoV0(itemProto) + claim <- LegalIdentityClaim.fromByteString(item.claim) + } yield ListSignedLegalIdentityClaimResult(context, claim) + } +} + +final case class ListParticipantDomainStateResult(context: BaseResult, item: ParticipantState) + +object ListParticipantDomainStateResult { + def fromProtoV0( + value: v0.ListParticipantDomainStateResult.Result + ): ParsingResult[ListParticipantDomainStateResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV0(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- ParticipantState.fromProtoV0(itemProto) + } yield ListParticipantDomainStateResult(context, item) + +} + +final case class ListMediatorDomainStateResult(context: BaseResult, item: MediatorDomainState) + +object ListMediatorDomainStateResult { + def fromProtoV0( + value: v0.ListMediatorDomainStateResult.Result + ): ParsingResult[ListMediatorDomainStateResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV0(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- MediatorDomainState.fromProtoV0(itemProto) + } yield ListMediatorDomainStateResult(context, item) + +} + +final case class ListVettedPackagesResult(context: BaseResult, item: VettedPackages) + +object ListVettedPackagesResult { + def fromProtoV0( + value: v0.ListVettedPackagesResult.Result + ): ParsingResult[ListVettedPackagesResult] = { + val v0.ListVettedPackagesResult.Result(contextPO, itemPO) = value + for { + contextProto <- ProtoConverter.required("context", contextPO) + context <- BaseResult.fromProtoV0(contextProto) + itemProto <- ProtoConverter.required("item", itemPO) + item <- VettedPackages.fromProtoV0(itemProto) + } yield ListVettedPackagesResult(context, item) + } +} + +final case class ListDomainParametersChangeResult( + context: BaseResult, + item: DynamicDomainParameters, +) + +object ListDomainParametersChangeResult { + def fromProtoV0( + value: v0.ListDomainParametersChangesResult.Result + ): ParsingResult[ListDomainParametersChangeResult] = for { + contextP <- value.context.toRight(ProtoDeserializationError.FieldNotSet("context")) + context <- BaseResult.fromProtoV0(contextP) + dynamicDomainParametersInternal <- value.parameters match { + case Parameters.Empty => Left(ProtoDeserializationError.FieldNotSet("parameters")) + case Parameters.V1(ddpX) => DynamicDomainParametersInternal.fromProtoV2(ddpX) + } + item = DynamicDomainParameters(dynamicDomainParametersInternal) + } yield ListDomainParametersChangeResult(context, item) +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/UserManagement.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/UserManagement.scala new file mode 100644 index 0000000000..7622139dd2 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/UserManagement.scala @@ -0,0 +1,125 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.data + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.daml.ledger.api.v1.admin.user_management_service.Right.Kind +import com.daml.ledger.api.v1.admin.user_management_service.{ + ListUsersResponse as ProtoListUsersResponse, + Right as ProtoUserRight, + User as ProtoLedgerApiUser, +} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.PartyId +import com.digitalasset.canton.{LfPartyId, ProtoDeserializationError} + +import scala.util.control.NoStackTrace + +final case class LedgerApiUser( + id: String, + primaryParty: Option[PartyId], + isDeactivated: Boolean, + metadata: LedgerApiObjectMeta, + identityProviderId: String, +) + +object LedgerApiUser { + def fromProtoV0( + value: ProtoLedgerApiUser + ): ParsingResult[LedgerApiUser] = { + val ProtoLedgerApiUser(id, primaryParty, isDeactivated, metadataO, identityProviderId) = value + Option + .when(primaryParty.nonEmpty)(primaryParty) + .traverse(LfPartyId.fromString(_).flatMap(PartyId.fromLfParty(_))) + .leftMap { err => + ProtoDeserializationError.ValueConversionError("primaryParty", err) + } + .map { primaryPartyO => + LedgerApiUser( + id = id, + primaryParty = primaryPartyO, + isDeactivated = isDeactivated, + metadata = LedgerApiObjectMeta( + resourceVersion = metadataO.fold("")(_.resourceVersion), + annotations = metadataO.fold(Map.empty[String, String])(_.annotations), + ), + identityProviderId = identityProviderId, + ) + } + } +} + +final case class UserRights( + actAs: Set[PartyId], + readAs: Set[PartyId], + participantAdmin: Boolean, + identityProviderAdmin: Boolean, +) +object UserRights { + def fromProtoV0( + values: Seq[ProtoUserRight] + ): ParsingResult[UserRights] = { + Right(values.map(_.kind).foldLeft(UserRights(Set(), Set(), false, false)) { + case (acc, Kind.Empty) => acc + case (acc, Kind.ParticipantAdmin(value)) => acc.copy(participantAdmin = true) + case (acc, Kind.CanActAs(value)) => + acc.copy(actAs = acc.actAs + PartyId.tryFromProtoPrimitive(value.party)) + case (acc, Kind.CanReadAs(value)) => + acc.copy(readAs = acc.readAs + PartyId.tryFromProtoPrimitive(value.party)) + case (acc, Kind.IdentityProviderAdmin(value)) => + acc.copy(identityProviderAdmin = true) + }) + } +} + +final case class ListLedgerApiUsersResult(users: Seq[LedgerApiUser], nextPageToken: String) + +object ListLedgerApiUsersResult { + def fromProtoV0( + value: ProtoListUsersResponse, + filterUser: String, + ): ParsingResult[ListLedgerApiUsersResult] = { + val ProtoListUsersResponse(protoUsers, nextPageToken) = value + protoUsers.traverse(LedgerApiUser.fromProtoV0).map { users => + ListLedgerApiUsersResult(users.filter(_.id.startsWith(filterUser)), nextPageToken) + } + } +} + +/** Represents a user value exposed in the Canton console + */ +final case class User( + id: String, + primaryParty: Option[PartyId], + isActive: Boolean, + annotations: Map[String, String], + identityProviderId: String, +) + +object User { + def fromLapiUser(u: LedgerApiUser): User = User( + id = u.id, + primaryParty = u.primaryParty, + isActive = !u.isDeactivated, + annotations = u.metadata.annotations, + identityProviderId = u.identityProviderId, + ) + def toLapiUser(u: User, resourceVersion: Option[String]): LedgerApiUser = LedgerApiUser( + id = u.id, + primaryParty = u.primaryParty, + isDeactivated = !u.isActive, + metadata = LedgerApiObjectMeta( + resourceVersion = resourceVersion.getOrElse(""), + annotations = u.annotations, + ), + identityProviderId = u.identityProviderId, + ) +} + +final case class ModifyingNonModifiableUserPropertiesError() + extends RuntimeException("MODIFYING_AN_UNMODIFIABLE_USER_PROPERTY_ERROR") + with NoStackTrace + +final case class UsersPage(users: Seq[User], nextPageToken: String) diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/crypto/CryptoKeys.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/crypto/CryptoKeys.scala new file mode 100644 index 0000000000..337670b05c --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/crypto/CryptoKeys.scala @@ -0,0 +1,27 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.data.crypto + +sealed trait CryptoKeyFormat extends Product with Serializable { + def name: String + override def toString: String = name +} + +object CryptoKeyFormat { + case object Tink extends CryptoKeyFormat { + override val name: String = "Tink" + } + + case object Der extends CryptoKeyFormat { + override val name: String = "DER" + } + + case object Raw extends CryptoKeyFormat { + override val name: String = "Raw" + } + + case object Symbolic extends CryptoKeyFormat { + override val name: String = "Symbolic" + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/crypto/Encryption.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/crypto/Encryption.scala new file mode 100644 index 0000000000..58f8edc9ba --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/crypto/Encryption.scala @@ -0,0 +1,41 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.data.crypto + +/** Key schemes for asymmetric/hybrid encryption. */ +sealed trait EncryptionKeyScheme extends Product with Serializable { + def name: String + override def toString: String = name +} + +object EncryptionKeyScheme { + case object EciesP256HkdfHmacSha256Aes128Gcm extends EncryptionKeyScheme { + override val name: String = "ECIES-P256_HMAC256_AES128-GCM" + } + + case object EciesP256HmacSha256Aes128Cbc extends EncryptionKeyScheme { + override val name: String = "ECIES-P256_HMAC256_AES128-CBC" + } + + case object Rsa2048OaepSha256 extends EncryptionKeyScheme { + override val name: String = "RSA2048-OAEP-SHA256" + } +} + +/** Key schemes for symmetric encryption. */ +sealed trait SymmetricKeyScheme extends Product with Serializable { + def name: String + override def toString: String = name + + def keySizeInBytes: Int +} + +object SymmetricKeyScheme { + + /** AES with 128bit key in GCM */ + case object Aes128Gcm extends SymmetricKeyScheme { + override def name: String = "AES128-GCM" + override def keySizeInBytes: Int = 16 + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/crypto/Hash.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/crypto/Hash.scala new file mode 100644 index 0000000000..16960fa44f --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/crypto/Hash.scala @@ -0,0 +1,10 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.data.crypto + +sealed abstract class HashAlgorithm(val name: String) + +object HashAlgorithm { + case object Sha256 extends HashAlgorithm("SHA-256") +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/crypto/Signing.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/crypto/Signing.scala new file mode 100644 index 0000000000..5b0c9a237a --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/crypto/Signing.scala @@ -0,0 +1,29 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.data.crypto + +sealed trait SigningKeyScheme extends Product with Serializable { + def name: String + + override def toString: String = name +} + +/** Schemes for signature keys. + * + * Ed25519 is the best performing curve and should be the default. + * EC-DSA is slower than Ed25519 but has better compatibility with other systems (such as CCF). + */ +object SigningKeyScheme { + case object Ed25519 extends SigningKeyScheme { + override val name: String = "Ed25519" + } + + case object EcDsaP256 extends SigningKeyScheme { + override def name: String = "ECDSA-P256" + } + + case object EcDsaP384 extends SigningKeyScheme { + override def name: String = "ECDSA-P384" + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/topologyx/TopologyX.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/topologyx/TopologyX.scala new file mode 100644 index 0000000000..256f00bc84 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/topologyx/TopologyX.scala @@ -0,0 +1,325 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.data.topologyx + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.ProtoDeserializationError.RefinedDurationConversionError +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.crypto.Fingerprint +import com.digitalasset.canton.protocol.DynamicDomainParameters +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.admin.v1 +import com.digitalasset.canton.topology.transaction.{ + AuthorityOfX, + DomainTrustCertificateX, + IdentifierDelegationX, + MediatorDomainStateX, + NamespaceDelegationX, + OwnerToKeyMappingX, + ParticipantDomainPermissionX, + PartyHostingLimitsX, + PartyToParticipantX, + PurgeTopologyTransactionX, + SequencerDomainStateX, + TopologyChangeOpX, + TrafficControlStateX, + UnionspaceDefinitionX, + VettedPackagesX, +} +import com.google.protobuf.ByteString + +import java.time.Instant + +final case class BaseResult( + domain: String, + validFrom: Instant, + validUntil: Option[Instant], + operation: TopologyChangeOpX, + transactionHash: ByteString, + serial: PositiveInt, + signedBy: NonEmpty[Seq[Fingerprint]], +) + +object BaseResult { + def fromProtoV1(value: v1.BaseResult): ParsingResult[BaseResult] = + for { + protoValidFrom <- ProtoConverter.required("valid_from", value.validFrom) + validFrom <- ProtoConverter.InstantConverter.fromProtoPrimitive(protoValidFrom) + validUntil <- value.validUntil.traverse(ProtoConverter.InstantConverter.fromProtoPrimitive) + operation <- TopologyChangeOpX.fromProtoV2(value.operation) + serial <- PositiveInt + .create(value.serial) + .leftMap(e => RefinedDurationConversionError("serial", e.message)) + signedBy <- + ProtoConverter.parseRequiredNonEmpty( + Fingerprint.fromProtoPrimitive, + "signed_by_fingerprints", + value.signedByFingerprints, + ) + } yield BaseResult( + value.store, + validFrom, + validUntil, + operation, + value.transactionHash, + serial, + signedBy, + ) +} + +final case class ListNamespaceDelegationResult( + context: BaseResult, + item: NamespaceDelegationX, +) + +object ListNamespaceDelegationResult { + def fromProtoV1( + value: v1.ListNamespaceDelegationResult.Result + ): ParsingResult[ListNamespaceDelegationResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV1(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- NamespaceDelegationX.fromProtoV2(itemProto) + } yield ListNamespaceDelegationResult(context, item) +} + +final case class ListUnionspaceDefinitionResult( + context: BaseResult, + item: UnionspaceDefinitionX, +) + +object ListUnionspaceDefinitionResult { + def fromProtoV1( + value: v1.ListUnionspaceDefinitionResult.Result + ): ParsingResult[ListUnionspaceDefinitionResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV1(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- UnionspaceDefinitionX.fromProtoV2(itemProto) + } yield ListUnionspaceDefinitionResult(context, item) +} + +final case class ListIdentifierDelegationResult( + context: BaseResult, + item: IdentifierDelegationX, +) + +object ListIdentifierDelegationResult { + def fromProtoV1( + value: v1.ListIdentifierDelegationResult.Result + ): ParsingResult[ListIdentifierDelegationResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV1(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- IdentifierDelegationX.fromProtoV2(itemProto) + } yield ListIdentifierDelegationResult(context, item) +} + +final case class ListOwnerToKeyMappingResult( + context: BaseResult, + item: OwnerToKeyMappingX, +) + +object ListOwnerToKeyMappingResult { + def fromProtoV1( + value: v1.ListOwnerToKeyMappingResult.Result + ): ParsingResult[ListOwnerToKeyMappingResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV1(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- OwnerToKeyMappingX.fromProtoV2(itemProto) + } yield ListOwnerToKeyMappingResult(context, item) +} + +final case class ListDomainTrustCertificateResult( + context: BaseResult, + item: DomainTrustCertificateX, +) + +object ListDomainTrustCertificateResult { + def fromProtoV1( + value: v1.ListDomainTrustCertificateResult.Result + ): ParsingResult[ListDomainTrustCertificateResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV1(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- DomainTrustCertificateX.fromProtoV2(itemProto) + } yield ListDomainTrustCertificateResult(context, item) +} + +final case class ListParticipantDomainPermissionResult( + context: BaseResult, + item: ParticipantDomainPermissionX, +) + +object ListParticipantDomainPermissionResult { + def fromProtoV1( + value: v1.ListParticipantDomainPermissionResult.Result + ): ParsingResult[ListParticipantDomainPermissionResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV1(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- ParticipantDomainPermissionX.fromProtoV2(itemProto) + } yield ListParticipantDomainPermissionResult(context, item) +} + +final case class ListPartyHostingLimitsResult( + context: BaseResult, + item: PartyHostingLimitsX, +) + +object ListPartyHostingLimitsResult { + def fromProtoV1( + value: v1.ListPartyHostingLimitsResult.Result + ): ParsingResult[ListPartyHostingLimitsResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV1(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- PartyHostingLimitsX.fromProtoV2(itemProto) + } yield ListPartyHostingLimitsResult(context, item) +} + +final case class ListVettedPackagesResult( + context: BaseResult, + item: VettedPackagesX, +) + +object ListVettedPackagesResult { + def fromProtoV1( + value: v1.ListVettedPackagesResult.Result + ): ParsingResult[ListVettedPackagesResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV1(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- VettedPackagesX.fromProtoV2(itemProto) + } yield ListVettedPackagesResult(context, item) +} + +final case class ListPartyToParticipantResult( + context: BaseResult, + item: PartyToParticipantX, +) + +object ListPartyToParticipantResult { + def fromProtoV1( + value: v1.ListPartyToParticipantResult.Result + ): ParsingResult[ListPartyToParticipantResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV1(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- PartyToParticipantX.fromProtoV2(itemProto) + } yield ListPartyToParticipantResult(context, item) +} + +final case class ListAuthorityOfResult( + context: BaseResult, + item: AuthorityOfX, +) + +object ListAuthorityOfResult { + def fromProtoV1( + value: v1.ListAuthorityOfResult.Result + ): ParsingResult[ListAuthorityOfResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV1(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- AuthorityOfX.fromProtoV2(itemProto) + } yield ListAuthorityOfResult(context, item) +} + +final case class ListDomainParametersStateResult( + context: BaseResult, + item: DynamicDomainParameters, +) + +object ListDomainParametersStateResult { + def fromProtoV1( + value: v1.ListDomainParametersStateResult.Result + ): ParsingResult[ListDomainParametersStateResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV1(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- DynamicDomainParameters.fromProtoV2(itemProto) + } yield ListDomainParametersStateResult(context, item) +} + +final case class ListMediatorDomainStateResult( + context: BaseResult, + item: MediatorDomainStateX, +) + +object ListMediatorDomainStateResult { + def fromProtoV1( + value: v1.ListMediatorDomainStateResult.Result + ): ParsingResult[ListMediatorDomainStateResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV1(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- MediatorDomainStateX.fromProtoV2(itemProto) + } yield ListMediatorDomainStateResult(context, item) +} + +final case class ListSequencerDomainStateResult( + context: BaseResult, + item: SequencerDomainStateX, +) + +object ListSequencerDomainStateResult { + def fromProtoV1( + value: v1.ListSequencerDomainStateResult.Result + ): ParsingResult[ListSequencerDomainStateResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV1(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- SequencerDomainStateX.fromProtoV2(itemProto) + } yield ListSequencerDomainStateResult(context, item) +} + +final case class ListPurgeTopologyTransactionXResult( + context: BaseResult, + item: PurgeTopologyTransactionX, +) + +object ListPurgeTopologyTransactionXResult { + def fromProtoV1( + value: v1.ListPurgeTopologyTransactionXResult.Result + ): ParsingResult[ListPurgeTopologyTransactionXResult] = + for { + contextProto <- ProtoConverter.required("context", value.context) + context <- BaseResult.fromProtoV1(contextProto) + itemProto <- ProtoConverter.required("item", value.item) + item <- PurgeTopologyTransactionX.fromProtoV2(itemProto) + } yield ListPurgeTopologyTransactionXResult(context, item) +} + +final case class ListTrafficStateResult( + context: BaseResult, + item: TrafficControlStateX, +) + +object ListTrafficStateResult { + def fromProtoV1( + value: v1.ListTrafficStateResult.Result + ): ParsingResult[ListTrafficStateResult] = + for { + context <- ProtoConverter.parseRequired(BaseResult.fromProtoV1, "context", value.context) + item <- ProtoConverter.parseRequired(TrafficControlStateX.fromProtoV2, "item", value.item) + } yield ListTrafficStateResult(context, item) +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonCommunityConfig.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonCommunityConfig.scala new file mode 100644 index 0000000000..0f3bc5fcfd --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonCommunityConfig.scala @@ -0,0 +1,140 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +import cats.data.Validated +import cats.syntax.functor.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.CantonRequireTypes.InstanceName +import com.digitalasset.canton.config.ConfigErrors.CantonConfigError +import com.digitalasset.canton.domain.config.{ + CommunityDomainConfig, + DomainBaseConfig, + RemoteDomainConfig, +} +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, TracedLogger} +import com.digitalasset.canton.participant.config.{ + CommunityParticipantConfig, + LocalParticipantConfig, + RemoteParticipantConfig, +} +import com.digitalasset.canton.tracing.TraceContext +import com.typesafe.config.Config +import monocle.macros.syntax.lens.* +import org.slf4j.{Logger, LoggerFactory} +import pureconfig.{ConfigReader, ConfigWriter} + +import java.io.File +import scala.annotation.nowarn + +final case class CantonCommunityConfig( + domains: Map[InstanceName, CommunityDomainConfig] = Map.empty, + participants: Map[InstanceName, CommunityParticipantConfig] = Map.empty, + participantsX: Map[InstanceName, CommunityParticipantConfig] = Map.empty, + remoteDomains: Map[InstanceName, RemoteDomainConfig] = Map.empty, + remoteParticipants: Map[InstanceName, RemoteParticipantConfig] = Map.empty, + remoteParticipantsX: Map[InstanceName, RemoteParticipantConfig] = Map.empty, + monitoring: MonitoringConfig = MonitoringConfig(), + parameters: CantonParameters = CantonParameters(), + features: CantonFeatures = CantonFeatures(), +) extends CantonConfig + with ConfigDefaults[DefaultPorts, CantonCommunityConfig] { + + override type DomainConfigType = CommunityDomainConfig + override type ParticipantConfigType = CommunityParticipantConfig + + /** renders the config as json (used for dumping config for diagnostic purposes) */ + override def dumpString: String = CantonCommunityConfig.makeConfidentialString(this) + + override def validate: Validated[NonEmpty[Seq[String]], Unit] = + CommunityConfigValidations.validate(this) + + override def withDefaults(ports: DefaultPorts): CantonCommunityConfig = + this + .focus(_.domains) + .modify(_.fmap(_.withDefaults(ports))) + .focus(_.participants) + .modify(_.fmap(_.withDefaults(ports))) + .focus(_.participantsX) + .modify(_.fmap(_.withDefaults(ports))) +} + +@nowarn("cat=lint-byname-implicit") // https://github.com/scala/bug/issues/12072 +object CantonCommunityConfig { + + /** Combine together deprecated implicits for types that define them + * This setup allows the compiler to pick the implicit for the most specific type when applying deprecations. + * For instance, + * ConfigReader[LocalParticipantConfig].applyDeprecations will pick up the deprecations implicit defined in + * LocalParticipantConfig instead of LocalNodeConfig + * despite LocalParticipantConfig being a subtype of LocalNodeConfig. + */ + object CantonDeprecationImplicits + extends LocalNodeConfig.LocalNodeConfigDeprecationImplicits + with LocalParticipantConfig.LocalParticipantDeprecationsImplicits + with DomainBaseConfig.DomainBaseConfigDeprecationImplicits + + private val logger: Logger = LoggerFactory.getLogger(classOf[CantonCommunityConfig]) + private val elc = ErrorLoggingContext( + TracedLogger(logger), + NamedLoggerFactory.root.properties, + TraceContext.empty, + ) + import pureconfig.generic.semiauto.* + import CantonConfig.* + + // Implemented as a def so we can pass the ErrorLoggingContext to be used during parsing + @nowarn("cat=unused") + private implicit def cantonCommunityConfigReader(implicit + elc: ErrorLoggingContext + ): ConfigReader[CantonCommunityConfig] = { // memoize it so we get the same instance every time + val configReaders: ConfigReaders = new ConfigReaders() + import configReaders.* + import DeprecatedConfigUtils.* + import CantonDeprecationImplicits.* + + implicit val communityDomainConfigReader: ConfigReader[CommunityDomainConfig] = + deriveReader[CommunityDomainConfig].applyDeprecations + implicit val communityParticipantConfigReader: ConfigReader[CommunityParticipantConfig] = + deriveReader[CommunityParticipantConfig].applyDeprecations + + deriveReader[CantonCommunityConfig] + } + + @nowarn("cat=unused") + private lazy implicit val cantonCommunityConfigWriter: ConfigWriter[CantonCommunityConfig] = { + val writers = new CantonConfig.ConfigWriters(confidential = true) + import writers.* + implicit val communityDomainConfigWriter: ConfigWriter[CommunityDomainConfig] = + deriveWriter[CommunityDomainConfig] + implicit val communityParticipantConfigWriter: ConfigWriter[CommunityParticipantConfig] = + deriveWriter[CommunityParticipantConfig] + + deriveWriter[CantonCommunityConfig] + } + + def load(config: Config)(implicit + elc: ErrorLoggingContext = elc + ): Either[CantonConfigError, CantonCommunityConfig] = + CantonConfig.loadAndValidate[CantonCommunityConfig](config) + + def loadOrExit(config: Config)(implicit elc: ErrorLoggingContext = elc): CantonCommunityConfig = + CantonConfig.loadOrExit[CantonCommunityConfig](config) + + def parseAndLoad(files: Seq[File])(implicit + elc: ErrorLoggingContext = elc + ): Either[CantonConfigError, CantonCommunityConfig] = + CantonConfig.parseAndLoad[CantonCommunityConfig](files) + + def parseAndLoadOrExit(files: Seq[File])(implicit + elc: ErrorLoggingContext = elc + ): CantonCommunityConfig = + CantonConfig.parseAndLoadOrExit[CantonCommunityConfig](files) + + def makeConfidentialString(config: CantonCommunityConfig): String = + "canton " + ConfigWriter[CantonCommunityConfig] + .to(config) + .render(CantonConfig.defaultConfigRenderer) + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala new file mode 100644 index 0000000000..8b9dec1ebd --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala @@ -0,0 +1,1600 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +//////////////////////////////////////////////////////// +// DO NOT USE INTELLIJ OPTIMIZE-IMPORT AS IT WILL REMOVE +// SOME OF THE IMPLICIT IMPORTS NECESSARY TO COMPILE +//////////////////////////////////////////////////////// + +import cats.Order +import cats.data.Validated +import cats.syntax.either.* +import cats.syntax.functor.* +import com.daml.jwt.JwtTimestampLeeway +import com.daml.metrics.HistogramDefinition +import com.daml.nonempty.NonEmpty +import com.daml.nonempty.catsinstances.* +import com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString.{ + InvalidLengthString, + defaultMaxLength, +} +import com.digitalasset.canton.config.CantonRequireTypes.* +import com.digitalasset.canton.config.ConfigErrors.{ + CannotParseFilesError, + CannotReadFilesError, + CantonConfigError, + GenericConfigError, + NoConfigFiles, + SubstitutionError, +} +import com.digitalasset.canton.config.DeprecatedConfigUtils.DeprecatedFieldsFor +import com.digitalasset.canton.config.InitConfigBase.NodeIdentifierConfig +import com.digitalasset.canton.config.RequireTypes.* +import com.digitalasset.canton.console.{AmmoniteConsoleConfig, FeatureFlag} +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.domain.DomainNodeParameters +import com.digitalasset.canton.domain.config.* +import com.digitalasset.canton.domain.sequencing.sequencer.* +import com.digitalasset.canton.environment.CantonNodeParameters +import com.digitalasset.canton.http.{HttpApiConfig, StaticContentConfig, WebsocketConfig} +import com.digitalasset.canton.ledger.runner.common.PureConfigReaderWriter.Secure.{ + commandConfigurationConvert, + dbConfigPostgresDataSourceConfigConvert, + identityProviderManagementConfigConvert, + indexServiceConfigConvert, + indexerConfigConvert, + userManagementServiceConfigConvert, +} +import com.digitalasset.canton.logging.ErrorLoggingContext +import com.digitalasset.canton.metrics.{MetricsConfig, MetricsPrefix, MetricsReporterConfig} +import com.digitalasset.canton.participant.ParticipantNodeParameters +import com.digitalasset.canton.participant.admin.AdminWorkflowConfig +import com.digitalasset.canton.participant.config.ParticipantInitConfig.{ + ParticipantLedgerApiInitConfig, + ParticipantParametersInitConfig, +} +import com.digitalasset.canton.participant.config.* +import com.digitalasset.canton.platform.apiserver.SeedService.Seeding +import com.digitalasset.canton.platform.apiserver.configuration.RateLimitingConfig +import com.digitalasset.canton.platform.config.ActiveContractsServiceStreamsConfig +import com.digitalasset.canton.platform.indexer.PackageMetadataViewConfig +import com.digitalasset.canton.protocol.DomainParameters.MaxRequestSize +import com.digitalasset.canton.pureconfigutils.HttpServerConfig +import com.digitalasset.canton.pureconfigutils.SharedConfigReaders.catchConvertError +import com.digitalasset.canton.sequencing.authentication.AuthenticationTokenManagerConfig +import com.digitalasset.canton.sequencing.client.SequencerClientConfig +import com.digitalasset.canton.tracing.TracingConfig +import com.typesafe.config.ConfigException.UnresolvedSubstitution +import com.typesafe.config.{ + Config, + ConfigException, + ConfigFactory, + ConfigList, + ConfigObject, + ConfigRenderOptions, + ConfigValue, + ConfigValueFactory, +} +import com.typesafe.scalalogging.LazyLogging +import monocle.macros.syntax.lens.* +import org.apache.pekko.stream.ThrottleMode +import pureconfig.* +import pureconfig.error.CannotConvert +import pureconfig.generic.{FieldCoproductHint, ProductHint} + +import java.io.File +import java.nio.file.{Path, Paths} +import scala.annotation.nowarn +import scala.concurrent.duration.* +import scala.reflect.ClassTag +import scala.util.Try + +/** Configuration for a check */ +sealed trait CheckConfig +object CheckConfig { + + /** Always return a healthy result - useful for testing and where there may be no suitable domain configured to use the ping health check */ + case object AlwaysHealthy extends CheckConfig + + /** Attempt to ping the given participant to determine health + * + * @param participant Alias of a locally configured participant (will ping itself) + * @param interval The duration to wait between pings + * @param timeout Duration to allow for the ping to complete + */ + final case class Ping( + participant: String, + interval: NonNegativeFiniteDuration, + timeout: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(10), + ) extends CheckConfig + + object IsActive { + trait IsActiveConfigDeprecationsImplicits { + implicit def deprecatedHealthConfig[X <: IsActive]: DeprecatedFieldsFor[X] = + new DeprecatedFieldsFor[IsActive] { + override def movedFields: List[DeprecatedConfigUtils.MovedConfigPath] = List( + DeprecatedConfigUtils.MovedConfigPath( + "participant", + "node", + ) + ) + } + } + object DeprecatedImplicits extends IsActiveConfigDeprecationsImplicits + } + + /** Returns the isActive state of a node. + * Intended for a HA node where only one of potentially many replicas will be active concurrently. + * @param node If unset will default to picking the only configured node as this is the likely usage of this check. + * If many nodes are available within the process it will pick the first participant node. + * If using many nodes in process then set to the configured name of the node to return + * the active status of. + */ + final case class IsActive(node: Option[String] = None) extends CheckConfig +} + +/** Configuration of health server backend. */ +final case class HealthServerConfig(address: String = "0.0.0.0", port: Port) + +/** Configuration to expose a health endpoint on the given `server` running the configured check + * @param server Server details for hosting the health endpoint + * @param check Check for determining whether this canton process is healthy + */ +final case class HealthConfig(server: HealthServerConfig, check: CheckConfig) + +/** Deadlock detection configuration + * + * A simple deadlock detection method. Using a background scheduler, we schedule a trivial future on the EC. + * If the Future is not executed until we check again, we alert. + * + * @param enabled if true, we'll monitor the EC for deadlocks (or slow processings) + * @param interval how often we check the EC + * @param warnInterval how often we report a deadlock as still being active + */ +final case class DeadlockDetectionConfig( + enabled: Boolean = true, + interval: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(3), + warnInterval: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(10), +) + +/** Configuration for metrics and tracing + * + * @param deadlockDetection Should we regularly check our environment EC for deadlocks? + * @param health Optional Health check to expose an http server to monitor is the canton process is healthy + * @param metrics Optional Metrics Reporter used to expose internally captured metrics + * @param delayLoggingThreshold Logs a warning message once the sequencer client falls behind in processing messages from the sequencer (based on the sequencing timestamp). + * + * @param tracing Tracing configuration + * @param logMessagePayloads Determines whether message payloads (as well as metadata) sent through GRPC are logged. + * @param logQueryCost Determines whether to log the 15 most expensive db queries + * @param logSlowFutures Whether we should active log slow futures (where instructed) + * @param dumpNumRollingLogFiles How many of the rolling log files shold be included in the remote dump. Default is 0. + */ +final case class MonitoringConfig( + deadlockDetection: DeadlockDetectionConfig = DeadlockDetectionConfig(), + health: Option[HealthConfig] = None, + metrics: MetricsConfig = MetricsConfig(), + // TODO(i9014) move into logging + delayLoggingThreshold: NonNegativeFiniteDuration = + MonitoringConfig.defaultDelayLoggingThreshold, + tracing: TracingConfig = TracingConfig(), + // TODO(i9014) rename to queries + logQueryCost: Option[QueryCostMonitoringConfig] = None, + // TODO(i9014) move into logging + logSlowFutures: Boolean = false, + logging: LoggingConfig = LoggingConfig(), + dumpNumRollingLogFiles: NonNegativeInt = MonitoringConfig.defaultDumpNumRollingLogFiles, +) extends LazyLogging { + + // merge in backwards compatible config options + def getLoggingConfig: LoggingConfig = + (logging.api.messagePayloads, logging.api.messagePayloads) match { + case (Some(fst), _) => + if (!logging.api.messagePayloads.forall(_ == fst)) + logger.error( + "Broken config validation: logging.api.message-payloads differs from logMessagePayloads" + ) + logging.focus(_.api.messagePayloads).replace(Some(fst)) + case _ => logging + } + +} + +object MonitoringConfig { + private val defaultDelayLoggingThreshold = NonNegativeFiniteDuration.ofSeconds(20) + private val defaultDumpNumRollingLogFiles = NonNegativeInt.tryCreate(0) +} + +/** Configuration for console command timeouts + * + * @param bounded timeout on how long "bounded" operations, i.e. operations which normally are supposed to conclude + * in a fixed timeframe can run before the console considers them as failed. + * @param unbounded timeout on how long "unbounded" operations can run, potentially infinite. + * @param ledgerCommand default timeout used for ledger commands + * @param ping default ping timeout + * @param testingBong default bong timeout + */ +final case class ConsoleCommandTimeout( + bounded: NonNegativeDuration = ConsoleCommandTimeout.defaultBoundedTimeout, + unbounded: NonNegativeDuration = ConsoleCommandTimeout.defaultUnboundedTimeout, + ledgerCommand: NonNegativeDuration = ConsoleCommandTimeout.defaultLedgerCommandsTimeout, + ping: NonNegativeDuration = ConsoleCommandTimeout.defaultPingTimeout, + testingBong: NonNegativeDuration = ConsoleCommandTimeout.defaultTestingBongTimeout, +) + +object ConsoleCommandTimeout { + val defaultBoundedTimeout: NonNegativeDuration = NonNegativeDuration.tryFromDuration(1.minute) + val defaultUnboundedTimeout: NonNegativeDuration = + NonNegativeDuration.tryFromDuration(Duration.Inf) + val defaultLedgerCommandsTimeout: NonNegativeDuration = + NonNegativeDuration.tryFromDuration(1.minute) + val defaultPingTimeout: NonNegativeDuration = NonNegativeDuration.tryFromDuration(20.seconds) + val defaultTestingBongTimeout: NonNegativeDuration = NonNegativeDuration.tryFromDuration(1.minute) +} + +/** Timeout settings configuration */ +final case class TimeoutSettings( + console: ConsoleCommandTimeout = ConsoleCommandTimeout(), + processing: ProcessingTimeout = ProcessingTimeout(), +) + +sealed trait ClockConfig extends Product with Serializable +object ClockConfig { + + /** Configure Canton to use a simclock + * + * A SimClock's time only progresses when [[com.digitalasset.canton.time.SimClock.advance]] is explicitly called. + */ + case object SimClock extends ClockConfig + + /** Configure Canton to use the wall clock (default) + * + * @param skew maximum simulated clock skew (0) + * If positive, Canton nodes will use a WallClock, but the time of the wall clocks + * will be shifted by a random number between `-simulateMaxClockSkewMillis` and + * `simulateMaxClockSkewMillis`. The clocks will never move backwards. + */ + final case class WallClock( + skew: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(0) + ) extends ClockConfig + + /** Configure Canton to use a remote clock + * + * In crash recovery testing scenarios, we want several processes to use the same time. + * In most cases, we can rely on NTP and the host clock. However, in cases + * where we test static time, we need the spawn processes to access the main processes + * clock. + * For such cases we can use a remote clock. However, no user should ever require this. + * @param remoteApi admin-port of the node to read the time from + */ + final case class RemoteClock(remoteApi: ClientConfig) extends ClockConfig + +} + +/** Default retention periods used by pruning commands where no values are explicitly specified. + * Although by default the commands will retain enough data to remain operational, + * however operators may like to retain more than this to facilitate possible disaster recovery scenarios or + * retain evidence of completed transactions. + */ +final case class RetentionPeriodDefaults( + sequencer: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofDays(7), + mediator: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofDays(7), + unauthenticatedMembers: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofHours(1), +) + +/** Parameters for testing Canton. Use default values in a production environment. + * + * @param enableAdditionalConsistencyChecks if true, run additional consistency checks. This will degrade performance. + * @param manualStart If set to true, the nodes have to be manually started via console (default false) + * @param startupParallelism Start up to N nodes in parallel (default is num-threads) + * @param nonStandardConfig don't fail config validation on non-standard configuration settings + * @param devVersionSupport If true, allow domain nodes to use unstable protocol versions and participant nodes to connect to such domains + * @param timeouts Sets the timeouts used for processing and console + * @param portsFile A ports file name, where the ports of all participants will be written to after startup + */ +final case class CantonParameters( + clock: ClockConfig = ClockConfig.WallClock(), + enableAdditionalConsistencyChecks: Boolean = false, + manualStart: Boolean = false, + startupParallelism: Option[PositiveInt] = None, + // TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version + nonStandardConfig: Boolean = true, + // TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version + devVersionSupport: Boolean = true, + portsFile: Option[String] = None, + timeouts: TimeoutSettings = TimeoutSettings(), + retentionPeriodDefaults: RetentionPeriodDefaults = RetentionPeriodDefaults(), + console: AmmoniteConsoleConfig = AmmoniteConsoleConfig(), +) { + def getStartupParallelism(numThreads: Int): Int = + startupParallelism.fold(numThreads)(_.value) +} + +/** Control which features are turned on / off in Canton + * + * @param enablePreviewCommands Feature flag to enable the set of commands that use functionality which we don't deem stable. + * @param enableTestingCommands Feature flag to enable the set of commands used by Canton developers for testing purposes. + * @param enableRepairCommands Feature flag to enable the set of commands used by Canton operators for manual repair purposes. + * @param skipTopologyManagerSignatureValidation If true, the signature validation of the domain topology transaction messages (2.x) will be skipped + */ +final case class CantonFeatures( + enablePreviewCommands: Boolean = false, + enableTestingCommands: Boolean = false, + enableRepairCommands: Boolean = false, + // TODO(#15221) remove for x-nodes + skipTopologyManagerSignatureValidation: Boolean = false, +) { + def featureFlags: Set[FeatureFlag] = { + (Seq(FeatureFlag.Stable) + ++ (if (enableTestingCommands) Seq(FeatureFlag.Testing) else Seq()) + ++ (if (enablePreviewCommands) Seq(FeatureFlag.Preview) else Seq()) + ++ (if (enableRepairCommands) Seq(FeatureFlag.Repair) else Seq())).toSet + } +} + +/** Root configuration parameters for a single Canton process. */ +trait CantonConfig { + + type DomainConfigType <: DomainConfig with ConfigDefaults[DefaultPorts, DomainConfigType] + type ParticipantConfigType <: LocalParticipantConfig with ConfigDefaults[ + DefaultPorts, + ParticipantConfigType, + ] + + /** all domains that this Canton process can operate + * + * domains are grouped by their alias, which is used to identify domains locally + */ + def domains: Map[InstanceName, DomainConfigType] + + /** Use `domains` instead! + */ + def domainsByString: Map[String, DomainConfigType] = domains.map { case (n, c) => n.unwrap -> c } + + /** all participants that this Canton process can operate or connect to + * + * participants are grouped by their local name + */ + def participants: Map[InstanceName, ParticipantConfigType] + + /** Use `participants` instead! + */ + def participantsByString: Map[String, ParticipantConfigType] = participants.map { case (n, c) => + n.unwrap -> c + } + + /** all participants that this Canton process can operate or connect to + * + * participants are grouped by their local name + */ + def participantsX: Map[InstanceName, ParticipantConfigType] + + /** Use `participantsX` instead! + */ + def participantsByStringX: Map[String, ParticipantConfigType] = participantsX.map { case (n, c) => + n.unwrap -> c + } + + /** all remotely running domains to which the console can connect and operate on */ + def remoteDomains: Map[InstanceName, RemoteDomainConfig] + + /** Use `remoteDomains` instead! + */ + def remoteDomainsByString: Map[String, RemoteDomainConfig] = remoteDomains.map { case (n, c) => + n.unwrap -> c + } + + /** all remotely running participants to which the console can connect and operate on */ + def remoteParticipants: Map[InstanceName, RemoteParticipantConfig] + + /** all remotely running participants to which the console can connect and operate on */ + def remoteParticipantsX: Map[InstanceName, RemoteParticipantConfig] + + /** Use `remoteParticipants` instead! + */ + def remoteParticipantsByString: Map[String, RemoteParticipantConfig] = remoteParticipants.map { + case (n, c) => + n.unwrap -> c + } + + /** Use `remoteParticipantsX` instead! + */ + def remoteParticipantsByStringX: Map[String, RemoteParticipantConfig] = remoteParticipantsX.map { + case (n, c) => + n.unwrap -> c + } + + /** determines how this Canton process can be monitored */ + def monitoring: MonitoringConfig + + /** per-environment parameters to control enabled features and set testing parameters */ + def parameters: CantonParameters + + /** control which features are enabled */ + def features: CantonFeatures + + /** dump config to string (without sensitive data) */ + def dumpString: String + + /** run a validation on the current config and return possible warning messages */ + def validate: Validated[NonEmpty[Seq[String]], Unit] + + private lazy val domainNodeParameters_ : Map[InstanceName, DomainNodeParameters] = domains.fmap { + domainConfig => + DomainNodeParameters( + general = CantonNodeParameterConverter.general(this, domainConfig), + protocol = CantonNodeParameterConverter.protocol(this, domainConfig.init.domainParameters), + maxBurstFactor = domainConfig.parameters.maxBurstFactor, + ) + } + + private[canton] def domainNodeParameters(name: InstanceName): DomainNodeParameters = + nodeParametersFor(domainNodeParameters_, "domain", name) + + /** Use `domainNodeParameters` instead! + */ + private[canton] def domainNodeParametersByString(name: String): DomainNodeParameters = + domainNodeParameters(InstanceName.tryCreate(name)) + + private lazy val participantNodeParameters_ : Map[InstanceName, ParticipantNodeParameters] = + (participants ++ participantsX).fmap { participantConfig => + val participantParameters = participantConfig.parameters + ParticipantNodeParameters( + general = CantonNodeParameterConverter.general(this, participantConfig), + partyChangeNotification = participantParameters.partyChangeNotification, + adminWorkflow = participantParameters.adminWorkflow, + maxUnzippedDarSize = participantParameters.maxUnzippedDarSize, + stores = participantParameters.stores, + transferTimeProofFreshnessProportion = + participantParameters.transferTimeProofFreshnessProportion, + protocolConfig = ParticipantProtocolConfig( + minimumProtocolVersion = participantParameters.minimumProtocolVersion.map(_.unwrap), + devVersionSupport = participantParameters.devVersionSupport, + dontWarnOnDeprecatedPV = participantParameters.dontWarnOnDeprecatedPV, + initialProtocolVersion = participantParameters.initialProtocolVersion.unwrap, + ), + uniqueContractKeys = participantConfig.init.parameters.uniqueContractKeys, + ledgerApiServerParameters = participantParameters.ledgerApiServerParameters, + excludeInfrastructureTransactions = participantParameters.excludeInfrastructureTransactions, + enableEngineStackTrace = participantParameters.enableEngineStackTraces, + enableContractUpgrading = participantParameters.enableContractUpgrading, + iterationsBetweenInterruptions = participantParameters.iterationsBetweenInterruptions, + ) + } + + private[canton] def participantNodeParameters( + participant: InstanceName + ): ParticipantNodeParameters = + nodeParametersFor(participantNodeParameters_, "participant", participant) + + /** Use `participantNodeParameters` instead! + */ + private[canton] def participantNodeParametersByString(name: String) = participantNodeParameters( + InstanceName.tryCreate(name) + ) + + protected def nodeParametersFor[A]( + cachedNodeParameters: Map[InstanceName, A], + kind: String, + name: InstanceName, + ): A = + cachedNodeParameters.getOrElse( + name, + throw new IllegalArgumentException( + s"Unknown $kind $name. Known ${kind}s: ${cachedNodeParameters.keys.mkString(", ")}" + ), + ) + + /** Produces a message in the structure "da:admin-api=1,public-api=2;participant1:admin-api=3,ledger-api=4". + * Helpful for diagnosing port already bound issues during tests. + * Allows any config value to be be null (can happen with invalid configs or config stubbed in tests) + */ + lazy val portDescription: String = mkPortDescription + + protected def mkPortDescription: String = { + def participant(config: LocalParticipantConfig): Seq[String] = + portDescriptionFromConfig(config)(Seq(("admin-api", _.adminApi), ("ledger-api", _.ledgerApi))) + + def domain(config: DomainConfig): Seq[String] = + portDescriptionFromConfig(config)(Seq(("admin-api", _.adminApi), ("public-api", _.publicApi))) + + Seq(domains.fmap(domain), participants.fmap(participant)) + .flatMap(_.map { case (name, ports) => + nodePortsDescription(name, ports) + }) + .mkString(";") + } + + protected def nodePortsDescription( + nodeName: InstanceName, + portDescriptions: Seq[String], + ): String = + s"$nodeName:${portDescriptions.mkString(",")}" + + protected def portDescriptionFromConfig[C]( + config: C + )(apiNamesAndExtractors: Seq[(String, C => ServerConfig)]): Seq[String] = { + def server(name: String, config: ServerConfig): Option[String] = + Option(config).map(c => s"$name=${c.port}") + Option(config) + .map(config => + apiNamesAndExtractors.map { case (name, extractor) => + server(name, extractor(config)) + } + ) + .getOrElse(Seq.empty) + .flatMap(_.toList) + } +} + +private[config] object CantonNodeParameterConverter { + import com.digitalasset.canton.time.EnrichedDurations.* + + def general(parent: CantonConfig, node: LocalNodeConfig): CantonNodeParameters.General = { + CantonNodeParameters.General.Impl( + parent.monitoring.tracing, + parent.monitoring.delayLoggingThreshold.toInternal, + parent.monitoring.logQueryCost, + parent.monitoring.getLoggingConfig, + parent.parameters.enableAdditionalConsistencyChecks, + parent.features.enablePreviewCommands, + parent.parameters.timeouts.processing, + node.sequencerClient, + node.caching, + node.parameters.batching, + parent.parameters.nonStandardConfig, + node.storage.parameters.migrateAndStart, + parent.features.skipTopologyManagerSignatureValidation, + ) + } + + def protocol(parent: CantonConfig, config: ProtocolConfig): CantonNodeParameters.Protocol = + CantonNodeParameters.Protocol.Impl( + devVersionSupport = parent.parameters.devVersionSupport || config.devVersionSupport, + dontWarnOnDeprecatedPV = config.dontWarnOnDeprecatedPV, + initialProtocolVersion = config.initialProtocolVersion, + ) + +} + +@nowarn("cat=lint-byname-implicit") // https://github.com/scala/bug/issues/12072 +object CantonConfig { + + implicit def preventAllUnknownKeys[T]: ProductHint[T] = ProductHint[T](allowUnknownKeys = false) + + import com.daml.nonempty.NonEmptyUtil.instances.* + import pureconfig.ConfigReader + import pureconfig.generic.semiauto.* + import pureconfig.module.cats.* + + implicit val communityStorageConfigTypeHint: FieldCoproductHint[CommunityStorageConfig] = + CantonConfigUtil.lowerCaseStorageConfigType[CommunityStorageConfig] + + /** In the external config we use `port` for an optionally set port, while internally we store it as `internalPort` */ + implicit def serverConfigProductHint[SC <: ServerConfig]: ProductHint[SC] = ProductHint[SC]( + fieldMapping = ConfigFieldMapping(CamelCase, KebabCase).withOverrides("internalPort" -> "port"), + allowUnknownKeys = false, + ) + + class ConfigReaders(implicit private val elc: ErrorLoggingContext) { + import CantonConfigUtil.* + import DeprecatedConfigUtils.* + import ParticipantInitConfig.DeprecatedImplicits.* + import com.digitalasset.canton.config.CheckConfig.IsActive.DeprecatedImplicits.* + import com.digitalasset.canton.metrics.MetricsReporterConfig.DeprecatedImplicits.* + import com.digitalasset.canton.platform.config.ActiveContractsServiceStreamsConfig.DeprecatedImplicits.* + import com.digitalasset.canton.participant.config.LedgerApiServerConfig.DeprecatedImplicits.* + + lazy implicit val lengthLimitedStringReader: ConfigReader[LengthLimitedString] = { + ConfigReader.fromString[LengthLimitedString] { str => + Either.cond( + str.nonEmpty && str.length <= defaultMaxLength, + new LengthLimitedStringVar(str, defaultMaxLength)(), + InvalidLengthString(str), + ) + } + } + + implicit val nonNegativeDurationReader: ConfigReader[NonNegativeDuration] = + ConfigReader.fromString[NonNegativeDuration] { str => + def err(message: String) = + CannotConvert(str, NonNegativeDuration.getClass.getName, message) + + Either + .catchOnly[NumberFormatException](Duration.apply(str)) + .leftMap(error => err(error.getMessage)) + .flatMap(duration => NonNegativeDuration.fromDuration(duration).leftMap(err)) + } + + implicit val positiveDurationSecondsReader: ConfigReader[PositiveDurationSeconds] = + ConfigReader.fromString[PositiveDurationSeconds] { str => + def err(message: String) = + CannotConvert(str, PositiveDurationSeconds.getClass.getName, message) + + Either + .catchOnly[NumberFormatException](Duration.apply(str)) + .leftMap(error => err(error.getMessage)) + .flatMap(duration => PositiveDurationSeconds.fromDuration(duration).leftMap(err)) + } + + implicit val maxRequestSizeReader: ConfigReader[MaxRequestSize] = + NonNegativeNumeric.nonNegativeNumericReader[Int].map(MaxRequestSize) + + implicit val sequencerTestingInterceptorReader + : ConfigReader[DatabaseSequencerConfig.TestingInterceptor] = + (_: ConfigCursor) => + sys.error("Sequencer testing interceptor cannot be created from pureconfig") + + implicit val tracingConfigDisabledSpanExporterReader + : ConfigReader[TracingConfig.Exporter.Disabled.type] = + deriveReader[TracingConfig.Exporter.Disabled.type] + implicit val tracingConfigJaegerSpanExporterReader + : ConfigReader[TracingConfig.Exporter.Jaeger] = + deriveReader[TracingConfig.Exporter.Jaeger] + implicit val tracingConfigZipkinSpanExporterReader + : ConfigReader[TracingConfig.Exporter.Zipkin] = + deriveReader[TracingConfig.Exporter.Zipkin] + implicit val tracingConfigOtlpSpanExporterReader: ConfigReader[TracingConfig.Exporter.Otlp] = + deriveReader[TracingConfig.Exporter.Otlp] + implicit val tracingConfigSpanExporterReader: ConfigReader[TracingConfig.Exporter] = + deriveReader[TracingConfig.Exporter] + implicit val tracingConfigAlwaysOnSamplerReader: ConfigReader[TracingConfig.Sampler.AlwaysOn] = + deriveReader[TracingConfig.Sampler.AlwaysOn] + implicit val tracingConfigAlwaysOffSamplerReader + : ConfigReader[TracingConfig.Sampler.AlwaysOff] = + deriveReader[TracingConfig.Sampler.AlwaysOff] + implicit val tracingConfigTraceIdRatioSamplerReader + : ConfigReader[TracingConfig.Sampler.TraceIdRatio] = + deriveReader[TracingConfig.Sampler.TraceIdRatio] + implicit val tracingConfigSamplerReader: ConfigReader[TracingConfig.Sampler] = + deriveReader[TracingConfig.Sampler] + implicit val tracingConfigBatchSpanProcessorReader + : ConfigReader[TracingConfig.BatchSpanProcessor] = + deriveReader[TracingConfig.BatchSpanProcessor] + implicit val tracingConfigTracerReader: ConfigReader[TracingConfig.Tracer] = + deriveReader[TracingConfig.Tracer] + // treat TracingConfig.Propagation as an enum as we currently only have case object types in the sealed family + implicit val tracingConfigPropagationReader: ConfigReader[TracingConfig.Propagation] = + deriveEnumerationReader[TracingConfig.Propagation] + implicit val tracingConfigReader: ConfigReader[TracingConfig] = + deriveReader[TracingConfig] + + implicit val contractIdSeedingReader: ConfigReader[Seeding] = + // Not using deriveEnumerationReader[Seeding] as we prefer "testing-static" over static (that appears + // in Seeding.name, but not in the case object name). This makes it clear that static is not to + // be used in production and avoids naming the configuration option contractIdSeedingOverrideOnlyForTesting or so. + ConfigReader.fromString[Seeding] { + case Seeding.Strong.name => Right(Seeding.Strong) + case Seeding.Weak.name => + Right( + Seeding.Weak + ) // Pending upstream discussions, weak may turn out to be viable too for production + case Seeding.Static.name => Right(Seeding.Static) + case unknownSeeding => + Left( + CannotConvert( + unknownSeeding, + Seeding.getClass.getName, + s"Seeding is neither ${Seeding.Strong.name}, ${Seeding.Weak.name}, nor ${Seeding.Static.name}: ${unknownSeeding}", + ) + ) + } + + /** Using semi-automatic derivation over automatic derivation to save compile time + * NOTE: the order of the declaration matters ... if you see errors such as + * could not find Lazy implicit value of type pureconfig.generic.DerivedConfigReader[..] + * then it means that the orders of the reader / writers is wrong. + * + * The following python script is very helpful to create the semi-automatic writer and readers + * + * import fileinput + * for line in fileinput.input(): + * t = line.strip() + * if len(t) > 0: + * a = (t[0].lower() + t[1:]).replace(".","") + * print(""" + * implicit val %sReader : ConfigReader[%s] = deriveReader[%s] + * implicit val %sWriter : ConfigWriter[%s] = deriveWriter[%s] + * """ % (a, t, t, a, t, t)) + */ + lazy implicit val tlsClientCertificateReader: ConfigReader[TlsClientCertificate] = + deriveReader[TlsClientCertificate] + lazy implicit val serverAuthRequirementConfigNoneReader + : ConfigReader[ServerAuthRequirementConfig.None.type] = + deriveReader[ServerAuthRequirementConfig.None.type] + lazy implicit val serverAuthRequirementConfigOptionalReader + : ConfigReader[ServerAuthRequirementConfig.Optional.type] = + deriveReader[ServerAuthRequirementConfig.Optional.type] + lazy implicit val serverAuthRequirementConfigRequireReader + : ConfigReader[ServerAuthRequirementConfig.Require] = + deriveReader[ServerAuthRequirementConfig.Require] + lazy implicit val serverAuthRequirementConfigReader: ConfigReader[ServerAuthRequirementConfig] = + deriveReader[ServerAuthRequirementConfig] + lazy implicit val keepAliveClientConfigReader: ConfigReader[KeepAliveClientConfig] = + deriveReader[KeepAliveClientConfig] + lazy implicit val keepAliveServerConfigReader: ConfigReader[KeepAliveServerConfig] = + deriveReader[KeepAliveServerConfig] + lazy implicit val tlsServerConfigReader: ConfigReader[TlsServerConfig] = + deriveReader[TlsServerConfig] + lazy implicit val tlsClientConfigReader: ConfigReader[TlsClientConfig] = + deriveReader[TlsClientConfig] + lazy implicit val initBaseIdentityConfigReader: ConfigReader[InitConfigBase.Identity] = + deriveReader[InitConfigBase.Identity] + lazy implicit val initConfigReader: ConfigReader[InitConfig] = deriveReader[InitConfig] + .enableNestedOpt("auto-init", _.copy(identity = None)) + lazy implicit val parametersParticipantInitConfigReader + : ConfigReader[ParticipantParametersInitConfig] = + deriveReader[ParticipantParametersInitConfig] + lazy implicit val ledgerApiParticipantInitConfigReader + : ConfigReader[ParticipantLedgerApiInitConfig] = + deriveReader[ParticipantLedgerApiInitConfig] + lazy implicit val nodeNameConfigReader: ConfigReader[NodeIdentifierConfig.Config.type] = + deriveReader[NodeIdentifierConfig.Config.type] + lazy implicit val nodeNameRandomReader: ConfigReader[NodeIdentifierConfig.Random.type] = + deriveReader[NodeIdentifierConfig.Random.type] + lazy implicit val nodeNameExplicitReader: ConfigReader[NodeIdentifierConfig.Explicit] = + deriveReader[NodeIdentifierConfig.Explicit] + lazy implicit val nodeNameReader: ConfigReader[NodeIdentifierConfig] = + deriveReader[NodeIdentifierConfig] + lazy implicit val participantInitConfigReader: ConfigReader[ParticipantInitConfig] = + deriveReader[ParticipantInitConfig].applyDeprecations + .enableNestedOpt("auto-init", _.copy(identity = None)) + lazy implicit val domainInitConfigReader: ConfigReader[DomainInitConfig] = + deriveReader[DomainInitConfig] + .enableNestedOpt("auto-init", _.copy(identity = None)) + implicit val grpcHealthServerConfigReader: ConfigReader[GrpcHealthServerConfig] = + deriveReader[GrpcHealthServerConfig] + lazy implicit val communityCryptoProviderReader: ConfigReader[CommunityCryptoProvider] = + deriveEnumerationReader[CommunityCryptoProvider] + lazy implicit val cryptoSigningKeySchemeReader: ConfigReader[SigningKeyScheme] = + deriveEnumerationReader[SigningKeyScheme] + lazy implicit val cryptoEncryptionKeySchemeReader: ConfigReader[EncryptionKeyScheme] = + deriveEnumerationReader[EncryptionKeyScheme] + lazy implicit val cryptoSymmetricKeySchemeReader: ConfigReader[SymmetricKeyScheme] = + deriveEnumerationReader[SymmetricKeyScheme] + lazy implicit val cryptoHashAlgorithmReader: ConfigReader[HashAlgorithm] = + deriveEnumerationReader[HashAlgorithm] + lazy implicit val cryptoKeyFormatReader: ConfigReader[CryptoKeyFormat] = + deriveEnumerationReader[CryptoKeyFormat] + implicit def cryptoSchemeConfig[S: ConfigReader: Order]: ConfigReader[CryptoSchemeConfig[S]] = + deriveReader[CryptoSchemeConfig[S]] + lazy implicit val communityCryptoReader: ConfigReader[CommunityCryptoConfig] = + deriveReader[CommunityCryptoConfig] + lazy implicit val apiTypeGrpcConfigReader: ConfigReader[ApiType.Grpc.type] = + deriveReader[ApiType.Grpc.type] + lazy implicit val apiTypeConfigReader: ConfigReader[ApiType] = deriveReader[ApiType] + lazy implicit val clientConfigReader: ConfigReader[ClientConfig] = deriveReader[ClientConfig] + lazy implicit val remoteDomainConfigReader: ConfigReader[RemoteDomainConfig] = + deriveReader[RemoteDomainConfig] + lazy implicit val remoteParticipantConfigReader: ConfigReader[RemoteParticipantConfig] = + deriveReader[RemoteParticipantConfig] + lazy implicit val batchingReader: ConfigReader[BatchingConfig] = + deriveReader[BatchingConfig] + lazy implicit val connectionAllocationReader: ConfigReader[ConnectionAllocation] = + deriveReader[ConnectionAllocation] + lazy implicit val dbParamsReader: ConfigReader[DbParametersConfig] = + deriveReader[DbParametersConfig] + lazy implicit val memoryReader: ConfigReader[CommunityStorageConfig.Memory] = + deriveReader[CommunityStorageConfig.Memory] + lazy implicit val h2Reader: ConfigReader[CommunityDbConfig.H2] = + deriveReader[CommunityDbConfig.H2] + lazy implicit val postgresReader: ConfigReader[CommunityDbConfig.Postgres] = + deriveReader[CommunityDbConfig.Postgres] + lazy implicit val dbConfigReader: ConfigReader[CommunityDbConfig] = + deriveReader[CommunityDbConfig] + lazy implicit val nodeMonitoringConfigReader: ConfigReader[NodeMonitoringConfig] = + deriveReader[NodeMonitoringConfig] + lazy implicit val communityStorageConfigReader: ConfigReader[CommunityStorageConfig] = + deriveReader[CommunityStorageConfig] + lazy implicit val monotonicTimeReader + : ConfigReader[TestingTimeServiceConfig.MonotonicTime.type] = + deriveReader[TestingTimeServiceConfig.MonotonicTime.type] + lazy implicit val testingTimeServiceConfigReader: ConfigReader[TestingTimeServiceConfig] = + deriveReader[TestingTimeServiceConfig] + + lazy implicit val communityAdminServerReader: ConfigReader[CommunityAdminServerConfig] = + deriveReader[CommunityAdminServerConfig] + lazy implicit val tlsBaseServerConfigReader: ConfigReader[TlsBaseServerConfig] = + deriveReader[TlsBaseServerConfig] + lazy implicit val communityPublicServerConfigReader: ConfigReader[CommunityPublicServerConfig] = + deriveReader[CommunityPublicServerConfig] + lazy implicit val clockConfigRemoteClockReader: ConfigReader[ClockConfig.RemoteClock] = + deriveReader[ClockConfig.RemoteClock] + lazy implicit val clockConfigWallClockReader: ConfigReader[ClockConfig.WallClock] = + deriveReader[ClockConfig.WallClock] + lazy implicit val clockConfigSimClockReader: ConfigReader[ClockConfig.SimClock.type] = + deriveReader[ClockConfig.SimClock.type] + lazy implicit val clockConfigReader: ConfigReader[ClockConfig] = deriveReader[ClockConfig] + lazy implicit val jwtTimestampLeewayConfigReader: ConfigReader[JwtTimestampLeeway] = + deriveReader[JwtTimestampLeeway] + lazy implicit val authServiceConfigUnsafeJwtHmac256Reader + : ConfigReader[AuthServiceConfig.UnsafeJwtHmac256] = + deriveReader[AuthServiceConfig.UnsafeJwtHmac256] + lazy implicit val authServiceConfigJwtEs256CrtReader + : ConfigReader[AuthServiceConfig.JwtEs256Crt] = + deriveReader[AuthServiceConfig.JwtEs256Crt] + lazy implicit val authServiceConfigJwtEs512CrtReader + : ConfigReader[AuthServiceConfig.JwtEs512Crt] = + deriveReader[AuthServiceConfig.JwtEs512Crt] + lazy implicit val authServiceConfigJwtRs256CrtReader + : ConfigReader[AuthServiceConfig.JwtRs256Crt] = + deriveReader[AuthServiceConfig.JwtRs256Crt] + lazy implicit val authServiceConfigJwtRs256JwksReader + : ConfigReader[AuthServiceConfig.JwtRs256Jwks] = + deriveReader[AuthServiceConfig.JwtRs256Jwks] + lazy implicit val authServiceConfigWildcardReader + : ConfigReader[AuthServiceConfig.Wildcard.type] = + deriveReader[AuthServiceConfig.Wildcard.type] + lazy implicit val authServiceConfigReader: ConfigReader[AuthServiceConfig] = + deriveReader[AuthServiceConfig] + lazy implicit val rateLimitConfigReader: ConfigReader[RateLimitingConfig] = + deriveReader[RateLimitingConfig] + lazy implicit val ledgerApiServerConfigReader: ConfigReader[LedgerApiServerConfig] = + deriveReader[LedgerApiServerConfig].applyDeprecations + + implicit val throttleModeCfgReader: ConfigReader[ThrottleMode] = + ConfigReader.fromString[ThrottleMode](catchConvertError { s => + s.toLowerCase() match { + case "enforcing" => Right(ThrottleMode.Enforcing) + case "shaping" => Right(ThrottleMode.Shaping) + case _ => Left("not one of 'shaping' or 'enforcing'") + } + }) + lazy implicit val portFileReader: ConfigReader[Path] = + ConfigReader.fromString[Path](catchConvertError { s => + scala.util.Try(Paths.get(s)).toEither.left.map(_.getMessage) + }) + lazy implicit val staticContentConfigReader: ConfigReader[StaticContentConfig] = + deriveReader[StaticContentConfig] + lazy implicit val wsConfigReader: ConfigReader[WebsocketConfig] = + deriveReader[WebsocketConfig] + + lazy implicit val httpServerConfigReader: ConfigReader[HttpServerConfig] = + deriveReader[HttpServerConfig] + lazy implicit val httpApiServerConfigReader: ConfigReader[HttpApiConfig] = + deriveReader[HttpApiConfig] + lazy implicit val activeContractsServiceConfigReader + : ConfigReader[ActiveContractsServiceStreamsConfig] = + deriveReader[ActiveContractsServiceStreamsConfig].applyDeprecations + lazy implicit val packageMetadataViewConfigReader: ConfigReader[PackageMetadataViewConfig] = + deriveReader[PackageMetadataViewConfig] + lazy implicit val identityConfigReader: ConfigReader[TopologyConfig] = + deriveReader[TopologyConfig] + lazy implicit val topologyXConfigReader: ConfigReader[TopologyXConfig] = + deriveReader[TopologyXConfig] + lazy implicit val sequencerConnectionConfigCertificateFileReader + : ConfigReader[SequencerConnectionConfig.CertificateFile] = + deriveReader[SequencerConnectionConfig.CertificateFile] + lazy implicit val sequencerConnectionConfigCertificateStringReader + : ConfigReader[SequencerConnectionConfig.CertificateString] = + deriveReader[SequencerConnectionConfig.CertificateString] + lazy implicit val sequencerConnectionConfigCertificateConfigReader + : ConfigReader[SequencerConnectionConfig.CertificateConfig] = + deriveReader[SequencerConnectionConfig.CertificateConfig] + lazy implicit val sequencerConnectionConfigGrpcReader + : ConfigReader[SequencerConnectionConfig.Grpc] = + deriveReader[SequencerConnectionConfig.Grpc] + lazy implicit val sequencerConnectionConfigReader: ConfigReader[SequencerConnectionConfig] = + deriveReader[SequencerConnectionConfig] + // since the big majority of users will use GRPC, default to it so that they don't need to specify `type = grpc` + .orElse(ConfigReader[SequencerConnectionConfig.Grpc]) + lazy implicit val communitySequencerConfigDatabaseReader + : ConfigReader[CommunitySequencerConfig.Database] = + deriveReader[CommunitySequencerConfig.Database] + lazy implicit val communityDatabaseSequencerReaderConfigReader + : ConfigReader[CommunitySequencerReaderConfig] = + deriveReader[CommunitySequencerReaderConfig] + lazy implicit val communitySequencerWriterCommitModeConfigReader: ConfigReader[CommitMode] = + deriveEnumerationReader[CommitMode] + lazy implicit val communityNewDatabaseSequencerWriterConfigReader + : ConfigReader[SequencerWriterConfig] = + deriveReader[SequencerWriterConfig] + lazy implicit val communityNewDatabaseSequencerWriterConfigHighThroughputReader + : ConfigReader[SequencerWriterConfig.HighThroughput] = + deriveReader[SequencerWriterConfig.HighThroughput] + lazy implicit val communityNewDatabaseSequencerWriterConfigLowLatencyReader + : ConfigReader[SequencerWriterConfig.LowLatency] = + deriveReader[SequencerWriterConfig.LowLatency] + lazy implicit val communitySequencerConfigReader: ConfigReader[CommunitySequencerConfig] = + deriveReader[CommunitySequencerConfig] + lazy implicit val domainParametersConfigReader: ConfigReader[DomainParametersConfig] = + deriveReader[DomainParametersConfig] + lazy implicit val domainNodeParametersConfigReader: ConfigReader[DomainNodeParametersConfig] = + deriveReader[DomainNodeParametersConfig] + lazy implicit val deadlockDetectionConfigReader: ConfigReader[DeadlockDetectionConfig] = + deriveReader[DeadlockDetectionConfig] + lazy implicit val checkConfigAlwaysHealthyReader: ConfigReader[CheckConfig.AlwaysHealthy.type] = + deriveReader[CheckConfig.AlwaysHealthy.type] + lazy implicit val checkConfigPingReader: ConfigReader[CheckConfig.Ping] = + deriveReader[CheckConfig.Ping] + lazy implicit val checkConfigIsActiveReader: ConfigReader[CheckConfig.IsActive] = + deriveReader[CheckConfig.IsActive].applyDeprecations + lazy implicit val checkConfigReader: ConfigReader[CheckConfig] = deriveReader[CheckConfig] + lazy implicit val healthServerConfigReader: ConfigReader[HealthServerConfig] = + deriveReader[HealthServerConfig] + lazy implicit val healthConfigReader: ConfigReader[HealthConfig] = deriveReader[HealthConfig] + lazy implicit val metricsFilterConfigReader: ConfigReader[MetricsConfig.MetricsFilterConfig] = + deriveReader[MetricsConfig.MetricsFilterConfig] + lazy implicit val metricsConfigGraphitePrefixStatic: ConfigReader[MetricsPrefix.Static] = + deriveReader[MetricsPrefix.Static] + lazy implicit val metricsConfigGraphitePrefixNoPrefix + : ConfigReader[MetricsPrefix.NoPrefix.type] = + deriveReader[MetricsPrefix.NoPrefix.type] + lazy implicit val metricsConfigGraphitePrefixHostname + : ConfigReader[MetricsPrefix.Hostname.type] = + deriveReader[MetricsPrefix.Hostname.type] + lazy implicit val metricsConfigGraphitePrefix: ConfigReader[MetricsPrefix] = + deriveReader[MetricsPrefix] + lazy implicit val metricsConfigGraphiteReader: ConfigReader[MetricsConfig.Graphite] = + deriveReader[MetricsConfig.Graphite] + lazy implicit val metricsConfigPrometheusReader: ConfigReader[MetricsConfig.Prometheus] = + deriveReader[MetricsConfig.Prometheus] + lazy implicit val metricsConfigCsvReader: ConfigReader[MetricsConfig.Csv] = + deriveReader[MetricsConfig.Csv] + lazy implicit val metricsConfigJMXReader: ConfigReader[MetricsConfig.JMX] = + deriveReader[MetricsConfig.JMX] + lazy implicit val metricsReporterConfigReader: ConfigReader[MetricsReporterConfig] = + deriveReader[MetricsReporterConfig].applyDeprecations + lazy implicit val histogramDefinitionConfigReader: ConfigReader[HistogramDefinition] = + deriveReader[HistogramDefinition] + lazy implicit val metricsConfigReader: ConfigReader[MetricsConfig] = deriveReader[MetricsConfig] + lazy implicit val queryCostMonitoringConfigReader: ConfigReader[QueryCostMonitoringConfig] = + deriveReader[QueryCostMonitoringConfig] + lazy implicit val apiLoggingConfigReader: ConfigReader[ApiLoggingConfig] = + deriveReader[ApiLoggingConfig] + lazy implicit val loggingConfigReader: ConfigReader[LoggingConfig] = + deriveReader[LoggingConfig] + implicit lazy val monitoringConfigReader: ConfigReader[MonitoringConfig] = + deriveReader[MonitoringConfig] + lazy implicit val consoleCommandTimeoutReader: ConfigReader[ConsoleCommandTimeout] = + deriveReader[ConsoleCommandTimeout] + lazy implicit val processingTimeoutReader: ConfigReader[ProcessingTimeout] = + deriveReader[ProcessingTimeout] + lazy implicit val timeoutSettingsReader: ConfigReader[TimeoutSettings] = + deriveReader[TimeoutSettings] + lazy implicit val partyNotificationConfigViaDomainReader + : ConfigReader[PartyNotificationConfig.ViaDomain.type] = + deriveReader[PartyNotificationConfig.ViaDomain.type] + lazy implicit val partyNotificationConfigEagerReader + : ConfigReader[PartyNotificationConfig.Eager.type] = + deriveReader[PartyNotificationConfig.Eager.type] + lazy implicit val partyNotificationConfigReader: ConfigReader[PartyNotificationConfig] = + deriveReader[PartyNotificationConfig] + lazy implicit val cacheConfigReader: ConfigReader[CacheConfig] = + deriveReader[CacheConfig] + lazy implicit val cacheConfigWithTimeoutReader: ConfigReader[CacheConfigWithTimeout] = + deriveReader[CacheConfigWithTimeout] + lazy implicit val cachingConfigsReader: ConfigReader[CachingConfigs] = + deriveReader[CachingConfigs] + lazy implicit val adminWorkflowConfigReader: ConfigReader[AdminWorkflowConfig] = + deriveReader[AdminWorkflowConfig] + lazy implicit val participantStoreConfigReader: ConfigReader[ParticipantStoreConfig] = + deriveReader[ParticipantStoreConfig] + lazy implicit val ledgerApiContractLoaderConfigReader: ConfigReader[ContractLoaderConfig] = + deriveReader[ContractLoaderConfig] + lazy implicit val ledgerApiServerParametersConfigReader + : ConfigReader[LedgerApiServerParametersConfig] = + deriveReader[LedgerApiServerParametersConfig] + lazy implicit val participantNodeParameterConfigReader + : ConfigReader[ParticipantNodeParameterConfig] = + deriveReader[ParticipantNodeParameterConfig] + lazy implicit val timeTrackerConfigReader: ConfigReader[DomainTimeTrackerConfig] = + deriveReader[DomainTimeTrackerConfig] + lazy implicit val timeRequestConfigReader: ConfigReader[TimeProofRequestConfig] = + deriveReader[TimeProofRequestConfig] + lazy implicit val authTokenManagerConfigReader: ConfigReader[AuthenticationTokenManagerConfig] = + deriveReader[AuthenticationTokenManagerConfig] + lazy implicit val sequencerClientConfigReader: ConfigReader[SequencerClientConfig] = + deriveReader[SequencerClientConfig] + lazy implicit val retentionPeriodDefaultsConfigReader: ConfigReader[RetentionPeriodDefaults] = + deriveReader[RetentionPeriodDefaults] + lazy implicit val inMemoryDbCacheSettingsReader: ConfigReader[DbCacheConfig] = + deriveReader[DbCacheConfig] + @nowarn("cat=unused") lazy implicit val batchAggregatorConfigReader + : ConfigReader[BatchAggregatorConfig] = { + implicit val batching = deriveReader[BatchAggregatorConfig.Batching] + implicit val noBatching = deriveReader[BatchAggregatorConfig.NoBatching.type] + + deriveReader[BatchAggregatorConfig] + } + + lazy implicit val ammoniteConfigReader: ConfigReader[AmmoniteConsoleConfig] = + deriveReader[AmmoniteConsoleConfig] + lazy implicit val cantonParametersReader: ConfigReader[CantonParameters] = + deriveReader[CantonParameters] + lazy implicit val cantonFeaturesReader: ConfigReader[CantonFeatures] = + deriveReader[CantonFeatures] + } + + /** writers + * @param confidential if set to true, confidential data which should not be shared for support purposes is blinded + */ + class ConfigWriters(confidential: Boolean) { + val confidentialWriter = new ConfidentialConfigWriter(confidential) + + implicit val nonNegativeDurationWriter: ConfigWriter[NonNegativeDuration] = + ConfigWriter.toString { x => + x.unwrap match { + case Duration.Inf => "Inf" + case y => y.toString + } + } + implicit val positiveDurationSecondsWriter: ConfigWriter[PositiveDurationSeconds] = + ConfigWriter.toString(_.underlying.toString) + + implicit val lengthLimitedStringWriter: ConfigWriter[LengthLimitedString] = + ConfigWriter.toString(_.unwrap) + implicit val nonNegativeIntWriter: ConfigWriter[NonNegativeInt] = + ConfigWriter.toString(x => x.unwrap.toString) + + implicit val maxRequestSizeWriter: ConfigWriter[MaxRequestSize] = + ConfigWriter.toString(x => x.unwrap.toString) + implicit val existingFileWriter: ConfigWriter[ExistingFile] = + ConfigWriter.toString(x => x.unwrap.toString) + implicit val nonEmptyStringWriter: ConfigWriter[NonEmptyString] = + ConfigWriter.toString(x => x.unwrap) + + implicit val sequencerTestingInterceptorWriter + : ConfigWriter[DatabaseSequencerConfig.TestingInterceptor] = + ConfigWriter.toString(_ => "None") + + implicit val contractIdSeedingWriter: ConfigWriter[Seeding] = ConfigWriter.toString(_.name) + + implicit val tracingConfigDisabledSpanExporterWriter + : ConfigWriter[TracingConfig.Exporter.Disabled.type] = + deriveWriter[TracingConfig.Exporter.Disabled.type] + implicit val tracingConfigJaegerSpanExporterWriter + : ConfigWriter[TracingConfig.Exporter.Jaeger] = + deriveWriter[TracingConfig.Exporter.Jaeger] + implicit val tracingConfigZipkinSpanExporterWriter + : ConfigWriter[TracingConfig.Exporter.Zipkin] = + deriveWriter[TracingConfig.Exporter.Zipkin] + implicit val tracingConfigOtlpSpanExporterWriter: ConfigWriter[TracingConfig.Exporter.Otlp] = + deriveWriter[TracingConfig.Exporter.Otlp] + implicit val tracingConfigSpanExporterWriter: ConfigWriter[TracingConfig.Exporter] = + deriveWriter[TracingConfig.Exporter] + implicit val tracingConfigAlwaysOnSamplerWriter: ConfigWriter[TracingConfig.Sampler.AlwaysOn] = + deriveWriter[TracingConfig.Sampler.AlwaysOn] + implicit val tracingConfigAlwaysOffSamplerWriter + : ConfigWriter[TracingConfig.Sampler.AlwaysOff] = + deriveWriter[TracingConfig.Sampler.AlwaysOff] + implicit val tracingConfigTraceIdRatioSamplerWriter + : ConfigWriter[TracingConfig.Sampler.TraceIdRatio] = + deriveWriter[TracingConfig.Sampler.TraceIdRatio] + implicit val tracingConfigSamplerWriter: ConfigWriter[TracingConfig.Sampler] = + deriveWriter[TracingConfig.Sampler] + implicit val tracingConfigBatchSpanProcessorWriter + : ConfigWriter[TracingConfig.BatchSpanProcessor] = + deriveWriter[TracingConfig.BatchSpanProcessor] + implicit val tracingConfigTracerWriter: ConfigWriter[TracingConfig.Tracer] = + deriveWriter[TracingConfig.Tracer] + // treat TracingConfig.Propagation as an enum as we currently only have case object types in the sealed family + implicit val tracingConfigPropagationWriter: ConfigWriter[TracingConfig.Propagation] = + deriveEnumerationWriter[TracingConfig.Propagation] + implicit val tracingConfigWriter: ConfigWriter[TracingConfig] = + deriveWriter[TracingConfig] + + lazy implicit val tlsClientCertificateWriter: ConfigWriter[TlsClientCertificate] = + deriveWriter[TlsClientCertificate] + lazy implicit val serverAuthRequirementConfigNoneWriter + : ConfigWriter[ServerAuthRequirementConfig.None.type] = + deriveWriter[ServerAuthRequirementConfig.None.type] + lazy implicit val serverAuthRequirementConfigOptionalWriter + : ConfigWriter[ServerAuthRequirementConfig.Optional.type] = + deriveWriter[ServerAuthRequirementConfig.Optional.type] + lazy implicit val serverAuthRequirementConfigRequireWriter + : ConfigWriter[ServerAuthRequirementConfig.Require] = + deriveWriter[ServerAuthRequirementConfig.Require] + lazy implicit val serverAuthRequirementConfigWriter: ConfigWriter[ServerAuthRequirementConfig] = + deriveWriter[ServerAuthRequirementConfig] + lazy implicit val keepAliveClientConfigWriter: ConfigWriter[KeepAliveClientConfig] = + deriveWriter[KeepAliveClientConfig] + lazy implicit val keepAliveServerConfigWriter: ConfigWriter[KeepAliveServerConfig] = + deriveWriter[KeepAliveServerConfig] + lazy implicit val tlsServerConfigWriter: ConfigWriter[TlsServerConfig] = + deriveWriter[TlsServerConfig] + lazy implicit val tlsClientConfigWriter: ConfigWriter[TlsClientConfig] = + deriveWriter[TlsClientConfig] + lazy implicit val initBaseIdentityConfigWriter: ConfigWriter[InitConfigBase.Identity] = + deriveWriter[InitConfigBase.Identity] + lazy implicit val initConfigWriter: ConfigWriter[InitConfig] = deriveWriter[InitConfig] + lazy implicit val grpcHealthServerConfigWriter: ConfigWriter[GrpcHealthServerConfig] = + deriveWriter[GrpcHealthServerConfig] + lazy implicit val parametersParticipantInitConfigWriter + : ConfigWriter[ParticipantParametersInitConfig] = + deriveWriter[ParticipantParametersInitConfig] + lazy implicit val ledgerApiParticipantInitConfigWriter + : ConfigWriter[ParticipantLedgerApiInitConfig] = + deriveWriter[ParticipantLedgerApiInitConfig] + lazy implicit val nodeNameConfigWriter: ConfigWriter[NodeIdentifierConfig.Config.type] = + deriveWriter[NodeIdentifierConfig.Config.type] + lazy implicit val nodeNameRandomWriter: ConfigWriter[NodeIdentifierConfig.Random.type] = + deriveWriter[NodeIdentifierConfig.Random.type] + lazy implicit val nodeNameExplicitWriter: ConfigWriter[NodeIdentifierConfig.Explicit] = + deriveWriter[NodeIdentifierConfig.Explicit] + lazy implicit val nodeNameWriter: ConfigWriter[NodeIdentifierConfig] = + deriveWriter[NodeIdentifierConfig] + lazy implicit val participantInitConfigWriter: ConfigWriter[ParticipantInitConfig] = + deriveWriter[ParticipantInitConfig] + lazy implicit val domainInitConfigWriter: ConfigWriter[DomainInitConfig] = + deriveWriter[DomainInitConfig] + lazy implicit val communityCryptoProviderWriter: ConfigWriter[CommunityCryptoProvider] = + deriveEnumerationWriter[CommunityCryptoProvider] + lazy implicit val cryptoSigningKeySchemeWriter: ConfigWriter[SigningKeyScheme] = + deriveEnumerationWriter[SigningKeyScheme] + lazy implicit val cryptoEncryptionKeySchemeWriter: ConfigWriter[EncryptionKeyScheme] = + deriveEnumerationWriter[EncryptionKeyScheme] + lazy implicit val cryptoSymmetricKeySchemeWriter: ConfigWriter[SymmetricKeyScheme] = + deriveEnumerationWriter[SymmetricKeyScheme] + lazy implicit val cryptoHashAlgorithmWriter: ConfigWriter[HashAlgorithm] = + deriveEnumerationWriter[HashAlgorithm] + lazy implicit val cryptoKeyFormatWriter: ConfigWriter[CryptoKeyFormat] = + deriveEnumerationWriter[CryptoKeyFormat] + implicit def cryptoSchemeConfigWriter[S: ConfigWriter]: ConfigWriter[CryptoSchemeConfig[S]] = + deriveWriter[CryptoSchemeConfig[S]] + lazy implicit val communityCryptoWriter: ConfigWriter[CommunityCryptoConfig] = + deriveWriter[CommunityCryptoConfig] + lazy implicit val clientConfigWriter: ConfigWriter[ClientConfig] = deriveWriter[ClientConfig] + lazy implicit val remoteDomainConfigWriter: ConfigWriter[RemoteDomainConfig] = + deriveWriter[RemoteDomainConfig] + lazy implicit val remoteParticipantConfigWriter: ConfigWriter[RemoteParticipantConfig] = + deriveWriter[RemoteParticipantConfig] + lazy implicit val nodeMonitoringConfigWriter: ConfigWriter[NodeMonitoringConfig] = + deriveWriter[NodeMonitoringConfig] + lazy implicit val batchingWriter: ConfigWriter[BatchingConfig] = + deriveWriter[BatchingConfig] + lazy implicit val connectionAllocationWriter: ConfigWriter[ConnectionAllocation] = + deriveWriter[ConnectionAllocation] + lazy implicit val dbParametersWriter: ConfigWriter[DbParametersConfig] = + deriveWriter[DbParametersConfig] + lazy implicit val memoryWriter: ConfigWriter[CommunityStorageConfig.Memory] = + deriveWriter[CommunityStorageConfig.Memory] + lazy implicit val h2Writer: ConfigWriter[CommunityDbConfig.H2] = + confidentialWriter[CommunityDbConfig.H2](x => + x.copy(config = DbConfig.hideConfidential(x.config)) + ) + lazy implicit val postgresWriter: ConfigWriter[CommunityDbConfig.Postgres] = + confidentialWriter[CommunityDbConfig.Postgres](x => + x.copy(config = DbConfig.hideConfidential(x.config)) + ) + lazy implicit val dbConfigWriter: ConfigWriter[CommunityDbConfig] = + deriveWriter[CommunityDbConfig] + lazy implicit val communityStorageConfigWriter: ConfigWriter[CommunityStorageConfig] = + deriveWriter[CommunityStorageConfig] + lazy implicit val monotonicTimeWriter + : ConfigWriter[TestingTimeServiceConfig.MonotonicTime.type] = + deriveWriter[TestingTimeServiceConfig.MonotonicTime.type] + lazy implicit val testingTimeServiceConfigWriter: ConfigWriter[TestingTimeServiceConfig] = + deriveWriter[TestingTimeServiceConfig] + lazy implicit val communityAdminServerConfigWriter: ConfigWriter[CommunityAdminServerConfig] = + deriveWriter[CommunityAdminServerConfig] + lazy implicit val tlsBaseServerConfigWriter: ConfigWriter[TlsBaseServerConfig] = + deriveWriter[TlsBaseServerConfig] + lazy implicit val apiTypeGrpcConfigWriter: ConfigWriter[ApiType.Grpc.type] = + deriveWriter[ApiType.Grpc.type] + lazy implicit val apiTypeConfigWriter: ConfigWriter[ApiType] = deriveWriter[ApiType] + lazy implicit val communityPublicServerConfigWriter: ConfigWriter[CommunityPublicServerConfig] = + deriveWriter[CommunityPublicServerConfig] + lazy implicit val clockConfigRemoteClockWriter: ConfigWriter[ClockConfig.RemoteClock] = + deriveWriter[ClockConfig.RemoteClock] + lazy implicit val clockConfigWallClockWriter: ConfigWriter[ClockConfig.WallClock] = + deriveWriter[ClockConfig.WallClock] + lazy implicit val clockConfigSimClockWriter: ConfigWriter[ClockConfig.SimClock.type] = + deriveWriter[ClockConfig.SimClock.type] + lazy implicit val clockConfigWriter: ConfigWriter[ClockConfig] = deriveWriter[ClockConfig] + lazy implicit val jwtTimestampLeewayConfigWriter: ConfigWriter[JwtTimestampLeeway] = + deriveWriter[JwtTimestampLeeway] + lazy implicit val authServiceConfigJwtEs256CrtWriter + : ConfigWriter[AuthServiceConfig.JwtEs256Crt] = + deriveWriter[AuthServiceConfig.JwtEs256Crt] + lazy implicit val authServiceConfigJwtEs512CrtWriter + : ConfigWriter[AuthServiceConfig.JwtEs512Crt] = + deriveWriter[AuthServiceConfig.JwtEs512Crt] + lazy implicit val authServiceConfigJwtRs256CrtWriter + : ConfigWriter[AuthServiceConfig.JwtRs256Crt] = + deriveWriter[AuthServiceConfig.JwtRs256Crt] + lazy implicit val authServiceConfigJwtRs256JwksWriter + : ConfigWriter[AuthServiceConfig.JwtRs256Jwks] = + deriveWriter[AuthServiceConfig.JwtRs256Jwks] + lazy implicit val authServiceConfigUnsafeJwtHmac256Writer + : ConfigWriter[AuthServiceConfig.UnsafeJwtHmac256] = + confidentialWriter[AuthServiceConfig.UnsafeJwtHmac256]( + _.copy(secret = NonEmptyString.tryCreate("****")) + ) + lazy implicit val authServiceConfigWildcardWriter + : ConfigWriter[AuthServiceConfig.Wildcard.type] = + deriveWriter[AuthServiceConfig.Wildcard.type] + lazy implicit val authServiceConfigWriter: ConfigWriter[AuthServiceConfig] = + deriveWriter[AuthServiceConfig] + lazy implicit val rateLimitConfigWriter: ConfigWriter[RateLimitingConfig] = + deriveWriter[RateLimitingConfig] + lazy implicit val ledgerApiServerConfigWriter: ConfigWriter[LedgerApiServerConfig] = + deriveWriter[LedgerApiServerConfig] + + implicit val throttleModeCfgWriter: ConfigWriter[ThrottleMode] = + ConfigWriter.toString[ThrottleMode] { + case ThrottleMode.Shaping => "shaping" + case ThrottleMode.Enforcing => "enforcing" + } + + lazy implicit val portFileWriter: ConfigWriter[Path] = + ConfigWriter.toString(_.toFile.getAbsolutePath) + lazy implicit val staticContentConfigWriter: ConfigWriter[StaticContentConfig] = + deriveWriter[StaticContentConfig] + lazy implicit val wsConfigWriter: ConfigWriter[WebsocketConfig] = + deriveWriter[WebsocketConfig] + + lazy implicit val httpServerConfigWriter: ConfigWriter[HttpServerConfig] = + deriveWriter[HttpServerConfig] + lazy implicit val httpApiServerConfigWriter: ConfigWriter[HttpApiConfig] = + deriveWriter[HttpApiConfig] + lazy implicit val activeContractsServiceConfigWriter + : ConfigWriter[ActiveContractsServiceStreamsConfig] = + deriveWriter[ActiveContractsServiceStreamsConfig] + lazy implicit val packageMetadataViewConfigWriter: ConfigWriter[PackageMetadataViewConfig] = + deriveWriter[PackageMetadataViewConfig] + lazy implicit val identityConfigWriter: ConfigWriter[TopologyConfig] = + deriveWriter[TopologyConfig] + lazy implicit val topologyXConfigWriter: ConfigWriter[TopologyXConfig] = + deriveWriter[TopologyXConfig] + lazy implicit val sequencerConnectionConfigCertificateFileWriter + : ConfigWriter[SequencerConnectionConfig.CertificateFile] = + deriveWriter[SequencerConnectionConfig.CertificateFile] + lazy implicit val sequencerConnectionConfigCertificateStringWriter + : ConfigWriter[SequencerConnectionConfig.CertificateString] = + confidentialWriter[SequencerConnectionConfig.CertificateString](_.copy(pemString = "****")) + lazy implicit val sequencerConnectionConfigCertificateConfigWriter + : ConfigWriter[SequencerConnectionConfig.CertificateConfig] = + deriveWriter[SequencerConnectionConfig.CertificateConfig] + lazy implicit val sequencerConnectionConfigGrpcWriter + : ConfigWriter[SequencerConnectionConfig.Grpc] = + deriveWriter[SequencerConnectionConfig.Grpc] + lazy implicit val sequencerConnectionConfigWriter: ConfigWriter[SequencerConnectionConfig] = + deriveWriter[SequencerConnectionConfig] + lazy implicit val communitySequencerConfigDatabaseWriter + : ConfigWriter[CommunitySequencerConfig.Database] = + deriveWriter[CommunitySequencerConfig.Database] + lazy implicit val communityDatabaseSequencerReaderConfigWriter + : ConfigWriter[CommunitySequencerReaderConfig] = + deriveWriter[CommunitySequencerReaderConfig] + lazy implicit val communitySequencerWriterCommitModeConfigWriter: ConfigWriter[CommitMode] = + deriveEnumerationWriter[CommitMode] + lazy implicit val communityDatabaseSequencerWriterConfigWriter + : ConfigWriter[SequencerWriterConfig] = + deriveWriter[SequencerWriterConfig] + lazy implicit val communityDatabaseSequencerWriterConfigHighThroughputWriter + : ConfigWriter[SequencerWriterConfig.HighThroughput] = + deriveWriter[SequencerWriterConfig.HighThroughput] + lazy implicit val communityDatabaseSequencerWriterConfigLowLatencyWriter + : ConfigWriter[SequencerWriterConfig.LowLatency] = + deriveWriter[SequencerWriterConfig.LowLatency] + lazy implicit val communitySequencerConfigWriter: ConfigWriter[CommunitySequencerConfig] = + deriveWriter[CommunitySequencerConfig] + lazy implicit val domainParametersConfigWriter: ConfigWriter[DomainParametersConfig] = + deriveWriter[DomainParametersConfig] + lazy implicit val domainNodeParametersConfigWriter: ConfigWriter[DomainNodeParametersConfig] = + deriveWriter[DomainNodeParametersConfig] + lazy implicit val deadlockDetectionConfigWriter: ConfigWriter[DeadlockDetectionConfig] = + deriveWriter[DeadlockDetectionConfig] + lazy implicit val checkConfigAlwaysHealthyWriter: ConfigWriter[CheckConfig.AlwaysHealthy.type] = + deriveWriter[CheckConfig.AlwaysHealthy.type] + lazy implicit val checkConfigPingWriter: ConfigWriter[CheckConfig.Ping] = + deriveWriter[CheckConfig.Ping] + lazy implicit val checkConfigIsActiveWriter: ConfigWriter[CheckConfig.IsActive] = + deriveWriter[CheckConfig.IsActive] + lazy implicit val checkConfigWriter: ConfigWriter[CheckConfig] = deriveWriter[CheckConfig] + lazy implicit val healthServerConfigWriter: ConfigWriter[HealthServerConfig] = + deriveWriter[HealthServerConfig] + lazy implicit val healthConfigWriter: ConfigWriter[HealthConfig] = deriveWriter[HealthConfig] + lazy implicit val metricsFilterConfigWriter: ConfigWriter[MetricsConfig.MetricsFilterConfig] = + deriveWriter[MetricsConfig.MetricsFilterConfig] + lazy implicit val metricsConfigGraphiteNoPrefix: ConfigWriter[MetricsPrefix.NoPrefix.type] = + deriveWriter[MetricsPrefix.NoPrefix.type] + lazy implicit val metricsConfigGraphiteStatic: ConfigWriter[MetricsPrefix.Static] = + deriveWriter[MetricsPrefix.Static] + lazy implicit val metricsConfigGraphiteHostname: ConfigWriter[MetricsPrefix.Hostname.type] = + deriveWriter[MetricsPrefix.Hostname.type] + lazy implicit val metricsConfigGraphitePrefix: ConfigWriter[MetricsPrefix] = + deriveWriter[MetricsPrefix] + lazy implicit val metricsConfigGraphiteWriter: ConfigWriter[MetricsConfig.Graphite] = + deriveWriter[MetricsConfig.Graphite] + lazy implicit val metricsConfigPrometheusWriter: ConfigWriter[MetricsConfig.Prometheus] = + deriveWriter[MetricsConfig.Prometheus] + lazy implicit val metricsConfigCsvWriter: ConfigWriter[MetricsConfig.Csv] = + deriveWriter[MetricsConfig.Csv] + lazy implicit val metricsConfigJMXWriter: ConfigWriter[MetricsConfig.JMX] = + deriveWriter[MetricsConfig.JMX] + lazy implicit val metricsReporterConfigWriter: ConfigWriter[MetricsReporterConfig] = + deriveWriter[MetricsReporterConfig] + lazy implicit val histogramDefinitionConfigWriter: ConfigWriter[HistogramDefinition] = + deriveWriter[HistogramDefinition] + lazy implicit val metricsConfigWriter: ConfigWriter[MetricsConfig] = deriveWriter[MetricsConfig] + lazy implicit val queryCostMonitoringConfig: ConfigWriter[QueryCostMonitoringConfig] = + deriveWriter[QueryCostMonitoringConfig] + lazy implicit val apiLoggingConfigWriter: ConfigWriter[ApiLoggingConfig] = + deriveWriter[ApiLoggingConfig] + lazy implicit val loggingConfigWriter: ConfigWriter[LoggingConfig] = + deriveWriter[LoggingConfig] + lazy implicit val monitoringConfigWriter: ConfigWriter[MonitoringConfig] = + deriveWriter[MonitoringConfig] + lazy implicit val consoleCommandTimeoutWriter: ConfigWriter[ConsoleCommandTimeout] = + deriveWriter[ConsoleCommandTimeout] + lazy implicit val processingTimeoutWriter: ConfigWriter[ProcessingTimeout] = + deriveWriter[ProcessingTimeout] + lazy implicit val timeoutSettingsWriter: ConfigWriter[TimeoutSettings] = + deriveWriter[TimeoutSettings] + lazy implicit val partyNotificationConfigViaDomainWriter + : ConfigWriter[PartyNotificationConfig.ViaDomain.type] = + deriveWriter[PartyNotificationConfig.ViaDomain.type] + lazy implicit val partyNotificationConfigEagerWriter + : ConfigWriter[PartyNotificationConfig.Eager.type] = + deriveWriter[PartyNotificationConfig.Eager.type] + lazy implicit val partyNotificationConfigWriter: ConfigWriter[PartyNotificationConfig] = + deriveWriter[PartyNotificationConfig] + lazy implicit val cacheConfigWriter: ConfigWriter[CacheConfig] = + deriveWriter[CacheConfig] + lazy implicit val cacheConfigWithTimeoutWriter: ConfigWriter[CacheConfigWithTimeout] = + deriveWriter[CacheConfigWithTimeout] + lazy implicit val cachingConfigsWriter: ConfigWriter[CachingConfigs] = + deriveWriter[CachingConfigs] + lazy implicit val adminWorkflowConfigWriter: ConfigWriter[AdminWorkflowConfig] = + deriveWriter[AdminWorkflowConfig] + lazy implicit val participantStoreConfigWriter: ConfigWriter[ParticipantStoreConfig] = + deriveWriter[ParticipantStoreConfig] + lazy implicit val ledgerApiContractLoaderConfigWriter: ConfigWriter[ContractLoaderConfig] = + deriveWriter[ContractLoaderConfig] + lazy implicit val ledgerApiServerParametersConfigWriter + : ConfigWriter[LedgerApiServerParametersConfig] = + deriveWriter[LedgerApiServerParametersConfig] + lazy implicit val participantNodeParameterConfigWriter + : ConfigWriter[ParticipantNodeParameterConfig] = + deriveWriter[ParticipantNodeParameterConfig] + lazy implicit val timeTrackerConfigWriter: ConfigWriter[DomainTimeTrackerConfig] = + deriveWriter[DomainTimeTrackerConfig] + lazy implicit val timeRequestConfigWriter: ConfigWriter[TimeProofRequestConfig] = + deriveWriter[TimeProofRequestConfig] + lazy implicit val authTokenManagerConfigWriter: ConfigWriter[AuthenticationTokenManagerConfig] = + deriveWriter[AuthenticationTokenManagerConfig] + lazy implicit val sequencerClientConfigWriter: ConfigWriter[SequencerClientConfig] = + deriveWriter[SequencerClientConfig] + lazy implicit val retentionPeriodDefaultsConfigWriter: ConfigWriter[RetentionPeriodDefaults] = + deriveWriter[RetentionPeriodDefaults] + lazy implicit val inMemoryDbCacheSettingsWriter: ConfigWriter[DbCacheConfig] = + deriveWriter[DbCacheConfig] + lazy implicit val batchAggregatorConfigWriter: ConfigWriter[BatchAggregatorConfig] = { + @nowarn("cat=unused") implicit val batching: ConfigWriter[BatchAggregatorConfig.Batching] = + deriveWriter[BatchAggregatorConfig.Batching] + @nowarn("cat=unused") implicit val noBatching + : ConfigWriter[BatchAggregatorConfig.NoBatching.type] = + deriveWriter[BatchAggregatorConfig.NoBatching.type] + + deriveWriter[BatchAggregatorConfig] + } + lazy implicit val ammoniteConfigWriter: ConfigWriter[AmmoniteConsoleConfig] = + deriveWriter[AmmoniteConsoleConfig] + lazy implicit val cantonParametersWriter: ConfigWriter[CantonParameters] = + deriveWriter[CantonParameters] + lazy implicit val cantonFeaturesWriter: ConfigWriter[CantonFeatures] = + deriveWriter[CantonFeatures] + } + + /** Parses and merges the provided configuration files into a single [[com.typesafe.config.Config]]. + * Also loads and merges the default config (as defined by the Lightbend config library) with the provided + * configuration files. Unless you know that you explicitly only want to use the provided configuration files, + * use this method. + * Any errors will be returned, but not logged. + * + * @param files config files to read, parse and merge + * @return [[scala.Right]] [[com.typesafe.config.Config]] if parsing was successful. + */ + private def parseAndMergeConfigs( + files: NonEmpty[Seq[File]] + )(implicit elc: ErrorLoggingContext): Either[CantonConfigError, Config] = { + val baseConfig = ConfigFactory.load() + for { + verifiedFiles <- verifyThatFilesCanBeRead(files) + parsedFiles <- parseConfigs(verifiedFiles) + combinedConfig = mergeConfigs(parsedFiles).withFallback(baseConfig) + } yield combinedConfig + } + + /** Parses and merges the provided configuration files into a single [[com.typesafe.config.Config]]. + * Does not load and merge the default config (as defined by the Lightbend config library) with the provided + * configuration files. Only use this if you explicitly know that you don't want to load and merge the default config. + * + * @param files config files to read, parse and merge + * @return [[scala.Right]] [[com.typesafe.config.Config]] if parsing was successful. + */ + def parseAndMergeJustCLIConfigs( + files: NonEmpty[Seq[File]] + )(implicit elc: ErrorLoggingContext): Either[CantonConfigError, Config] = { + for { + verifiedFiles <- verifyThatFilesCanBeRead(files) + parsedFiles <- parseConfigs(verifiedFiles) + combinedConfig = mergeConfigs(parsedFiles) + } yield combinedConfig + } + + /** Renders a configuration file such that we can write it to the log-file on startup */ + def renderForLoggingOnStartup(config: Config): String = { + import scala.jdk.CollectionConverters.* + val replace = + Set("secret", "pw", "password", "ledger-api-jdbc-url", "jdbc", "token", "admin-token") + val blinded = ConfigValueFactory.fromAnyRef("****") + def goVal(key: String, c: ConfigValue): ConfigValue = { + c match { + case lst: ConfigList => goLst(lst) + case obj: ConfigObject => + goObj(obj) + case other => + if (replace.contains(key)) + blinded + else other + } + } + def goObj(c: ConfigObject): ConfigObject = { + val resolved = Try(c.isEmpty).isSuccess + if (resolved) { + c.entrySet().asScala.map(x => (x.getKey, x.getValue)).foldLeft(c) { + case (acc, (key, value)) => + acc.withValue(key, goVal(key, value)) + } + } else c + } + def goLst(c: ConfigList): ConfigList = { + val mapped = (0 until c.size()) map { idx => + goVal("idx", c.get(idx)) + } + ConfigValueFactory.fromIterable(mapped.asJava) + } + def go(c: Config): Config = { + c + .root() + .entrySet() + .asScala + .map(x => (x.getKey, x.getValue)) + .foldLeft(c) { case (subConfig, (key, obj)) => + subConfig.withValue(key, goVal(key, obj)) + } + } + go(config) + .resolve() + .root() + .get("canton") + .render(CantonConfig.defaultConfigRenderer) + } + + private def verifyThatFilesCanBeRead( + files: NonEmpty[Seq[File]] + )(implicit elc: ErrorLoggingContext): Either[CantonConfigError, NonEmpty[Seq[File]]] = { + val filesThatCannotBeRead = files.filterNot(_.canRead) + Either.cond( + filesThatCannotBeRead.isEmpty, + files, + CannotReadFilesError.Error(filesThatCannotBeRead), + ) + } + + private def parseConfigs( + files: NonEmpty[Seq[File]] + )(implicit elc: ErrorLoggingContext): Either[CantonConfigError, NonEmpty[Seq[Config]]] = { + import cats.implicits.* + files.toNEF + .traverse(f => Either.catchOnly[ConfigException](ConfigFactory.parseFile(f)).toValidatedNec) + .toEither + .leftMap(errs => CannotParseFilesError.Error(errs.toList)) + .leftWiden[CantonConfigError] + } + + private def configOrExit[ConfClass: ClassTag]( + result: Either[CantonConfigError, ConfClass] + ): ConfClass = { + result.valueOr { _ => + sys.exit(1) + } + } + + /** Merge a number of [[com.typesafe.config.Config]] instances into a single [[com.typesafe.config.Config]]. + * If the same key is included in multiple configurations, then the last definition has highest precedence. + */ + def mergeConfigs(firstConfig: Config, otherConfigs: Seq[Config]): Config = + otherConfigs.foldLeft(firstConfig)((combined, config) => config.withFallback(combined)) + + /** Merge a number of [[com.typesafe.config.Config]] instances into a single [[com.typesafe.config.Config]]. + * If the same key is included in multiple configurations, then the last definition has highest precedence. + */ + def mergeConfigs(configs: NonEmpty[Seq[Config]]): Config = + mergeConfigs(configs.head1, configs.tail1) + + /** Parses the provided files to generate a [[com.typesafe.config.Config]], then attempts to load the + * [[com.typesafe.config.Config]] based on the given ClassTag. Will return an error (but not log anything) if + * any steps fails. + * + * @param files config files to read, parse and merge + * @return [[scala.Right]] of type `ConfClass` (e.g. [[CantonCommunityConfig]])) if parsing was successful. + */ + def parseAndLoad[ + ConfClass <: CantonConfig & ConfigDefaults[DefaultPorts, ConfClass]: ClassTag: ConfigReader + ]( + files: Seq[File] + )(implicit elc: ErrorLoggingContext): Either[CantonConfigError, ConfClass] = { + for { + nonEmpty <- NonEmpty.from(files).toRight(NoConfigFiles.Error()) + parsedAndMerged <- parseAndMergeConfigs(nonEmpty) + loaded <- loadAndValidate[ConfClass](parsedAndMerged) + } yield loaded + } + + /** Parses the provided files to generate a [[com.typesafe.config.Config]], then attempts to load the + * [[com.typesafe.config.Config]] based on the given ClassTag. Will log the error and exit with code 1, if any error + * is encountered. + * * + * @param files config files to read - must be a non-empty Seq + * @throws java.lang.IllegalArgumentException if `files` is empty + * @return [[scala.Right]] of type `ClassTag` (e.g. [[CantonCommunityConfig]])) if parsing was successful. + */ + def parseAndLoadOrExit[ + ConfClass <: CantonConfig & ConfigDefaults[DefaultPorts, ConfClass]: ClassTag: ConfigReader + ](files: Seq[File])(implicit + elc: ErrorLoggingContext + ): ConfClass = { + val result = parseAndLoad[ConfClass](files) + configOrExit(result) + } + + /** Will load a case class configuration (defined by template args) from the configuration object. + * Any configuration errors encountered will be returned (but not logged). + * + * @return [[scala.Right]] of type `CantonConfig` (e.g. [[CantonCommunityConfig]])) if parsing was successful. + */ + def loadAndValidate[ConfClass <: CantonConfig & ConfigDefaults[ + DefaultPorts, + ConfClass, + ]: ClassTag: ConfigReader]( + config: Config + )(implicit elc: ErrorLoggingContext): Either[CantonConfigError, ConfClass] = { + // config.resolve forces any substitutions to be resolved (typically referenced environment variables or system properties). + // this normally would happen by default during ConfigFactory.load(), + // however we have to manually as we've merged in individual files. + val result = Either.catchOnly[UnresolvedSubstitution](config.resolve()) + result match { + case Right(resolvedConfig) => + loadRawConfig[ConfClass](resolvedConfig) + .flatMap { conf => + val confWithDefaults = conf.withDefaults(new DefaultPorts()) + confWithDefaults.validate.toEither + .map(_ => confWithDefaults) + .leftMap(causes => ConfigErrors.ValidationError.Error(causes.toList)) + } + case Left(substitutionError) => Left(SubstitutionError.Error(Seq(substitutionError))) + } + } + + /** Will load a case class configuration (defined by template args) from the configuration object. + * If any configuration errors are encountered, they will be logged and the thread will exit with code 1. + * + * @return [[scala.Right]] of type `ClassTag` (e.g. [[CantonCommunityConfig]])) if parsing was successful. + */ + def loadOrExit[ + ConfClass <: CantonConfig & ConfigDefaults[DefaultPorts, ConfClass]: ClassTag: ConfigReader + ]( + config: Config + )(implicit elc: ErrorLoggingContext): ConfClass = { + loadAndValidate[ConfClass](config).valueOr(_ => sys.exit(1)) + } + + private[config] def loadRawConfig[ConfClass <: CantonConfig: ClassTag: ConfigReader]( + rawConfig: Config + )(implicit elc: ErrorLoggingContext): Either[CantonConfigError, ConfClass] = { + pureconfig.ConfigSource + .fromConfig(rawConfig) + .at("canton") + .load[ConfClass] + .leftMap(failures => GenericConfigError.Error(ConfigErrors.getMessage[ConfClass](failures))) + } + + lazy val defaultConfigRenderer: ConfigRenderOptions = + ConfigRenderOptions.defaults().setOriginComments(false).setComments(false).setJson(false) +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/config/CommunityConfigValidations.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/config/CommunityConfigValidations.scala new file mode 100644 index 0000000000..61bde41054 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/config/CommunityConfigValidations.scala @@ -0,0 +1,302 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +import cats.data.Validated +import cats.syntax.foldable.* +import cats.syntax.functor.* +import cats.syntax.functorFilter.* +import com.daml.nonempty.NonEmpty +import com.daml.nonempty.catsinstances.* +import com.digitalasset.canton.config.CantonRequireTypes.InstanceName +import com.digitalasset.canton.domain.config.DomainParametersConfig +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.HandshakeErrors.DeprecatedProtocolVersion +import com.digitalasset.canton.version.ProtocolVersion + +import java.net.URI + +private[config] trait ConfigValidations[C <: CantonConfig] { + final def validate(config: C): Validated[NonEmpty[Seq[String]], Unit] = + validations.traverse_(_(config)) + + protected val validations: List[C => Validated[NonEmpty[Seq[String]], Unit]] +} + +object CommunityConfigValidations + extends ConfigValidations[CantonCommunityConfig] + with NamedLogging { + import TraceContext.Implicits.Empty.* + override protected def loggerFactory: NamedLoggerFactory = NamedLoggerFactory.root + + final case class DbAccess(url: String, user: Option[String]) { + private lazy val urlNoPassword = { + val uri = new URI( + url.replace("jdbc:", "") + ) + val queryNoPassword = Option(uri.getQuery) + .getOrElse("") + .split('&') + .map(param => + if (param.startsWith("password=")) "" + else param + ) + .mkString + new URI(uri.getScheme, uri.getAuthority, uri.getPath, queryNoPassword, uri.getFragment) + } + + override def toString: String = + s"DbAccess($urlNoPassword, $user)" + } + + type Validation = CantonCommunityConfig => Validated[NonEmpty[Seq[String]], Unit] + + override protected val validations: List[Validation] = + List[Validation](noDuplicateStorage, atLeastOneNode) ++ genericValidations[ + CantonCommunityConfig + ] + + /** Validations applied to all community and enterprise Canton configurations. */ + private[config] def genericValidations[C <: CantonConfig] + : List[C => Validated[NonEmpty[Seq[String]], Unit]] = + List( + developmentProtocolSafetyCheckDomains, + developmentProtocolSafetyCheckParticipants, + warnIfUnsafeMinProtocolVersion, + warnIfUnsafeProtocolVersionEmbeddedDomain, + adminTokenSafetyCheckParticipants, + ) + + /** Group node configs by db access to find matching db storage configs. + * Overcomplicated types used are to work around that at this point nodes could have conflicting names so we can't just + * throw them all in a single map. + */ + private[config] def extractNormalizedDbAccess[C <: CantonConfig]( + nodeConfigs: Map[String, LocalNodeConfig]* + ): Map[DbAccess, List[(String, LocalNodeConfig)]] = { + // Basic attempt to normalize JDBC URL-based configuration and explicit property configuration + // Limitations: Does not parse nor normalize the JDBC URLs + def normalize(dbConfig: DbConfig): Option[DbAccess] = { + import slick.util.ConfigExtensionMethods.* + + val slickConfig = dbConfig.config + + def getPropStr(prop: String): Option[String] = + slickConfig.getStringOpt(prop).orElse(slickConfig.getStringOpt(s"properties.$prop")) + + def getPropInt(prop: String): Option[Int] = + slickConfig.getIntOpt(prop).orElse(slickConfig.getIntOpt(s"properties.$prop")) + + def extractUrl: Option[String] = + getPropStr("url").orElse(getPropStr("jdbcUrl")) + + def extractServerPortDbAsUrl: Option[String] = + for { + server <- getPropStr("serverName") + port <- getPropInt("portNumber") + dbName <- getPropStr("databaseName") + url = dbConfig match { + case _: H2DbConfig => DbConfig.h2Url(dbName) + case _: PostgresDbConfig => DbConfig.postgresUrl(server, port, dbName) + // Assume Oracle + case _ => DbConfig.oracleUrl(server, port, dbName) + } + } yield url + + val user = getPropStr("user") + extractUrl.orElse(extractServerPortDbAsUrl).map(url => DbAccess(url = url, user = user)) + } + + // combine into a single list of name to config + val configs = nodeConfigs.map(_.toList).foldLeft(List[(String, LocalNodeConfig)]())(_ ++ _) + + val withStorageConfigs = configs.mapFilter { case (name, config) => + config.storage match { + case dbConfig: DbConfig => normalize(dbConfig).map((_, name, config)) + case _ => None + } + } + + withStorageConfigs + .groupBy { case (dbAccess, _, _) => dbAccess } + .fmap(_.map { case (_, name, config) => + (name, config) + }) + } + + private[config] def formatNodeList(nodes: List[(String, LocalNodeConfig)]): String = + nodes.map { case (name, config) => s"${config.nodeTypeName} $name" }.mkString(",") + + /** Validate the config that the storage configuration is not shared between nodes. */ + private def noDuplicateStorage( + config: CantonCommunityConfig + ): Validated[NonEmpty[Seq[String]], Unit] = { + val dbAccessToNodes = + extractNormalizedDbAccess(config.participantsByString, config.domainsByString) + + dbAccessToNodes.toSeq + .traverse_ { + case (dbAccess, nodes) if nodes.lengthCompare(1) > 0 => + Validated.invalid( + NonEmpty(Seq, s"Nodes ${formatNodeList(nodes)} share same DB access: $dbAccess") + ) + case _ => Validated.valid(()) + } + } + + @SuppressWarnings(Array("org.wartremover.warts.Product", "org.wartremover.warts.Serializable")) + private def atLeastOneNode( + config: CantonCommunityConfig + ): Validated[NonEmpty[Seq[String]], Unit] = { + val CantonCommunityConfig( + domains, + participants, + participantsX, + remoteDomains, + remoteParticipants, + remoteParticipantsX, + _, + _, + _, + ) = + config + Validated.cond( + Seq( + domains, + participants, + remoteDomains, + remoteParticipants, + participantsX, + remoteParticipantsX, + ) + .exists(_.nonEmpty), + (), + NonEmpty(Seq, "At least one node must be defined in the configuration"), + ) + + } + + private[config] val backwardsCompatibleLoggingConfigErr = + "Inconsistent configuration of canton.monitoring.log-message-payloads and canton.monitoring.logging.api.message-payloads. Please use the latter in your configuration" + + private def developmentProtocolSafetyCheckDomains( + config: CantonConfig + ): Validated[NonEmpty[Seq[String]], Unit] = { + developmentProtocolSafetyCheck( + config.parameters.nonStandardConfig, + config.domains.toSeq.map { case (k, v) => + (k, v.init.domainParameters) + }, + ) + } + + private def developmentProtocolSafetyCheckParticipants( + config: CantonConfig + ): Validated[NonEmpty[Seq[String]], Unit] = { + def toNe( + name: String, + nonStandardConfig: Boolean, + devVersionSupport: Boolean, + ): Validated[NonEmpty[Seq[String]], Unit] = { + Validated.cond( + nonStandardConfig || !devVersionSupport, + (), + NonEmpty( + Seq, + s"Enabling dev-version-support for participant $name requires you to explicitly set canton.parameters.non-standard-config = yes", + ), + ) + } + + config.participants.toList.traverse_ { case (name, participantConfig) => + toNe( + name.unwrap, + config.parameters.nonStandardConfig, + participantConfig.parameters.devVersionSupport, + ) + } + } + + private def warnIfUnsafeMinProtocolVersion( + config: CantonConfig + ): Validated[NonEmpty[Seq[String]], Unit] = { + config.participants.toSeq.foreach { case (name, config) => + val minimum = config.parameters.minimumProtocolVersion.map(_.unwrap) + val isMinimumDeprecatedVersion = minimum.getOrElse(ProtocolVersion.minimum).isDeprecated + + if (isMinimumDeprecatedVersion && !config.parameters.dontWarnOnDeprecatedPV) + DeprecatedProtocolVersion.WarnParticipant(name, minimum).discard + } + Validated.valid(()) + } + + private def warnIfUnsafeProtocolVersionEmbeddedDomain( + config: CantonConfig + ): Validated[NonEmpty[Seq[String]], Unit] = { + config.domains.toSeq.foreach { case (name, config) => + val pv = config.init.domainParameters.protocolVersion.unwrap + if (pv.isDeprecated && !config.init.domainParameters.dontWarnOnDeprecatedPV) + DeprecatedProtocolVersion.WarnDomain(name, pv).discard + } + Validated.valid(()) + } + + private[config] def developmentProtocolSafetyCheck( + allowUnstableProtocolVersion: Boolean, + namesAndConfig: Seq[(InstanceName, DomainParametersConfig)], + ): Validated[NonEmpty[Seq[String]], Unit] = { + def toNe( + name: String, + protocolVersion: ProtocolVersion, + allowUnstableProtocolVersion: Boolean, + ): Validated[NonEmpty[Seq[String]], Unit] = { + Validated.cond( + protocolVersion.isStable || allowUnstableProtocolVersion, + (), + NonEmpty( + Seq, + s"Using non-stable protocol $protocolVersion for node $name requires you to explicitly set canton.parameters.non-standard-config = yes", + ), + ) + } + + namesAndConfig.toList.traverse_ { case (name, parameters) => + toNe( + name.unwrap, + parameters.protocolVersion.version, + allowUnstableProtocolVersion, + ) + } + + } + + private def adminTokenSafetyCheckParticipants( + config: CantonConfig + ): Validated[NonEmpty[Seq[String]], Unit] = { + def toNe( + name: String, + nonStandardConfig: Boolean, + adminToken: Option[String], + ): Validated[NonEmpty[Seq[String]], Unit] = { + Validated.cond( + nonStandardConfig || adminToken.isEmpty, + (), + NonEmpty( + Seq, + s"Setting ledger-api.admin-token for participant $name requires you to explicitly set canton.parameters.non-standard-config = yes", + ), + ) + } + + config.participants.toList.traverse_ { case (name, participantConfig) => + toNe( + name.unwrap, + config.parameters.nonStandardConfig, + participantConfig.ledgerApi.adminToken, + ) + } + } + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/config/ConfigErrors.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/config/ConfigErrors.scala new file mode 100644 index 0000000000..3b6ca1b238 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/config/ConfigErrors.scala @@ -0,0 +1,137 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +import com.daml.error.{ErrorCategory, ErrorCode, Explanation, Resolution} +import com.digitalasset.canton.error.CantonError +import com.digitalasset.canton.error.CantonErrorGroups.ConfigErrorGroup +import com.digitalasset.canton.logging.ErrorLoggingContext +import com.typesafe.config.ConfigException +import org.slf4j.event.Level +import pureconfig.error.ConfigReaderFailures + +import java.io.File +import scala.collection.mutable +import scala.reflect.{ClassTag, classTag} + +/** * Trait which acts as a wrapper around + * 1. `lightbend ConfigException`s which are caught when attempting to read or parse a configuration file + * 2. `pureconfig ConfigReaderFailures` which are returned when attempting to convert a given + * [[com.typesafe.config.Config]] instance (basically a valid HOCON-file) + * to one of the Canton configs + */ +object ConfigErrors extends ConfigErrorGroup { + + sealed abstract class ConfigErrorCode(id: String) + extends ErrorCode(id, ErrorCategory.InvalidIndependentOfSystemState) { + // we classify ConfigErrors as ERROR so they are shown directly to the user when he attempts to start Canton + // via canton -c + override def logLevel: Level = Level.ERROR + override def errorConveyanceDocString: Option[String] = + Some( + "Config errors are logged and output to stdout if starting Canton with a given configuration fails" + ) + } + + abstract class CantonConfigError( + override val cause: String, + override val throwableO: Option[Throwable] = None, + )(implicit override val code: ErrorCode) + extends CantonError {} + + sealed abstract class ExceptionBasedConfigError( + override val cause: String, + override val throwableO: Option[Throwable] = None, + )(implicit override val code: ErrorCode) + extends CantonConfigError(cause, throwableO)(code) { + + def exceptions: Seq[ConfigException] + + override def log(): Unit = { + super.log() + + exceptions.foreach { e => + loggingContext.logger.debug( + code.toMsg( + s"Received the following exception while attempting to parse the Canton config files", + loggingContext.traceContext.traceId, + ), + e, + )(loggingContext.traceContext) + } + } + } + + final case object NoConfigFiles extends ConfigErrorCode("NO_CONFIG_FILES") { + final case class Error()(implicit override val loggingContext: ErrorLoggingContext) + extends CantonConfigError( + "No config files were given to Canton. We require at least one config file given via --config or a key:value pair given via -C." + ) + } + + @Resolution(""" In general, this can be one of many errors since this is the 'miscellaneous category' of configuration errors. + | One of the more common errors in this category is an 'unknown key' error. This error usually means that + | a keyword that is not valid (e.g. it may have a typo 'bort' instead of 'port'), or that a valid keyword + | at the wrong part of the configuration hierarchy was used (e.g. to enable database replication for a participant, the correct configuration + | is `canton.participants.participant2.replication.enabled = true` and not `canton.participants.replication.enabled = true`). + | Please refer to the scaladoc of either `CantonEnterpriseConfig` or `CantonCommunityConfig` (depending on whether the community or enterprise version is used) to find the valid configuration keywords and the correct position in the configuration hierarchy. + |""") + final case object GenericConfigError extends ConfigErrorCode("GENERIC_CONFIG_ERROR") { + final case class Error(override val cause: String)(implicit + override val loggingContext: ErrorLoggingContext + ) extends CantonConfigError(cause) + } + + @Explanation( + "This error is usually thrown when Canton can't find a given configuration file." + ) + @Resolution( + "Make sure that the path and name of all configuration files is correctly specified. " + ) + final case object CannotReadFilesError extends ConfigErrorCode("CANNOT_READ_CONFIG_FILES") { + final case class Error(unreadableFiles: Seq[File])(implicit + override val loggingContext: ErrorLoggingContext + ) extends CantonConfigError("At least one configuration file could not be read.") + } + + @Explanation( + "This error is usually thrown because a config file doesn't contain configs in valid HOCON format. " + + "The most common cause of an invalid HOCON format is a forgotten bracket." + ) + @Resolution("Make sure that all files are in valid HOCON format.") + final case object CannotParseFilesError extends ConfigErrorCode("CANNOT_PARSE_CONFIG_FILES") { + final case class Error(override val exceptions: Seq[ConfigException])(implicit + override val loggingContext: ErrorLoggingContext + ) extends ExceptionBasedConfigError( + s"Received an exception (full stack trace has been logged at DEBUG level) while attempting to parse ${exceptions.length} .conf-file(s)." + ) + } + + @Resolution( + "A common cause of this error is attempting to use an environment variable that isn't defined within a config-file. " + ) + final case object SubstitutionError extends ConfigErrorCode("CONFIG_SUBSTITUTION_ERROR") { + final case class Error(override val exceptions: Seq[ConfigException])(implicit + override val loggingContext: ErrorLoggingContext + ) extends ExceptionBasedConfigError( + s"Received an exception (full stack trace has been logged at DEBUG level) while attempting to parse ${exceptions.length} .conf-file(s)." + ) + } + + final case object ValidationError extends ConfigErrorCode("CONFIG_VALIDATION_ERROR") { + final case class Error(causes: Seq[String])(implicit + override val loggingContext: ErrorLoggingContext + ) extends CantonConfigError( + s"Failed to validate the configuration due to: ${causes.mkString("\n")}" + ) + } + + def getMessage[ConfClass: ClassTag](failures: ConfigReaderFailures): String = { + val linesBuffer = mutable.Buffer.empty[String] + linesBuffer += s"Cannot convert configuration to a config of ${classTag[ConfClass].runtimeClass}. Failures are:" + linesBuffer += failures.prettyPrint(1) + linesBuffer += "" + linesBuffer.mkString(System.lineSeparator()) + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/AdminCommandRunner.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/AdminCommandRunner.scala new file mode 100644 index 0000000000..78ec76a06f --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/AdminCommandRunner.scala @@ -0,0 +1,118 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand +import com.digitalasset.canton.concurrent.Threading +import com.digitalasset.canton.config.{CantonConfig, NonNegativeDuration} +import com.digitalasset.canton.console.CommandErrors.ConsoleTimeout +import com.digitalasset.canton.crypto.Crypto +import com.digitalasset.canton.environment.{CantonNode, CantonNodeBootstrap} +import com.digitalasset.canton.logging.{NamedLogging, TracedLogger} + +import scala.annotation.tailrec + +/** Support for running an admin command + */ +trait AdminCommandRunner { + + /** Run a GRPC admin command and return its result. + * Most of the commands are only defined for the GRPC interface, so we default to showing an error message + * if the command is called for a node configured with an HTTP interface. + */ + protected[console] def adminCommand[Result]( + grpcCommand: GrpcAdminCommand[_, _, Result] + ): ConsoleCommandResult[Result] + + protected[console] def tracedLogger: TracedLogger + +} + +object AdminCommandRunner { + def retryUntilTrue(timeout: NonNegativeDuration)( + condition: => Boolean + ): ConsoleCommandResult[Unit] = { + val deadline = timeout.asFiniteApproximation.fromNow + @tailrec + def go(): ConsoleCommandResult[Unit] = { + val res = condition + if (!res) { + if (deadline.hasTimeLeft()) { + Threading.sleep(100) + go() + } else { + ConsoleTimeout.Error(timeout.asJavaApproximation) + } + } else { + CommandSuccessful(()) + } + } + go() + } +} + +/** Support for running ledgerApi commands + */ +trait LedgerApiCommandRunner { + + protected[console] def ledgerApiCommand[Result]( + command: GrpcAdminCommand[_, _, Result] + ): ConsoleCommandResult[Result] + + protected[console] def token: Option[String] + +} + +/** Support for inspecting the instance */ +trait BaseInspection[I <: CantonNode] { + + def underlying: Option[I] = { + runningNode.flatMap(_.getNode) + } + + protected[console] def runningNode: Option[CantonNodeBootstrap[I]] + protected[console] def startingNode: Option[CantonNodeBootstrap[I]] + protected[console] def name: String + + protected[console] def access[T](ops: I => T): T = { + ops( + runningNode + .getOrElse(throw new IllegalArgumentException(s"instance $name is not running")) + .getNode + .getOrElse( + throw new IllegalArgumentException( + s"instance $name is still starting or awaiting manual initialisation." + ) + ) + ) + } + + protected[canton] def crypto: Crypto = { + runningNode + .flatMap(_.crypto) + .getOrElse(throw new IllegalArgumentException(s"instance $name is not running.")) + } + +} + +trait FeatureFlagFilter extends NamedLogging { + + protected def consoleEnvironment: ConsoleEnvironment + + protected def cantonConfig: CantonConfig = consoleEnvironment.environment.config + + private def checkEnabled[T](flag: Boolean, config: String, command: => T): T = + if (flag) { + command + } else { + noTracingLogger.error( + s"The command is currently disabled. You need to enable it explicitly by setting `canton.features.${config} = yes` in your Canton configuration file (`.conf`)" + ) + throw new CommandFailure() + } + + protected def check[T](flag: FeatureFlag)(command: => T): T = + checkEnabled(consoleEnvironment.featureSet.contains(flag), flag.configName, command) + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/AmmoniteCacheLock.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/AmmoniteCacheLock.scala new file mode 100644 index 0000000000..ea29ec415c --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/AmmoniteCacheLock.scala @@ -0,0 +1,135 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import ammonite.runtime.Storage +import cats.syntax.either.* +import com.digitalasset.canton.logging.TracedLogger +import com.digitalasset.canton.tracing.TraceContext + +import java.io.{File, RandomAccessFile} +import java.nio.channels.OverlappingFileLockException +import scala.concurrent.blocking +import scala.util.control.NonFatal + +trait AmmoniteCacheLock { + def release(): Unit + def storage: Storage + def lockFile: Option[java.io.File] +} + +object AmmoniteCacheLock { + + // Don't change this to lazy val, as the underlying InMemory storage is not thread safe. + def InMemory: AmmoniteCacheLock = new AmmoniteCacheLock { + override def release(): Unit = () + override val storage: Storage = Storage.InMemory() + override def toString: String = "in-memory cache" + override def lockFile: Option[File] = None + } + + def create(logger: TracedLogger, path: os.Path, isRepl: Boolean): AmmoniteCacheLock = { + import TraceContext.Implicits.Empty.* + def go(index: Int): Either[String, AmmoniteCacheLock] = { + val newPath = path / s"$index" + for { + _ <- Either.cond(index < 255, (), s"Cache dir attempt reached $index, giving up") + _ <- createDirsIfNecessary(newPath) + _ <- ensureDirIsWritable(newPath) + lockO <- acquireLock(logger, newPath, isRepl).leftMap { err => + logger.debug("Observed lock exception", err) + err.getMessage + } + lock <- lockO match { + case Some(value) => Right(value) + case None => go(index + 1) + } + } yield lock + } + try { + // check that cache directory is writable + val attempt = for { + _ <- createDirsIfNecessary(path) + _ <- ensureDirIsWritable(path) + lock <- go(0) + } yield lock + attempt match { + case Right(lock) => lock + case Left(err) => + logger.warn( + s"Failed to acquire ammonite cache directory due to ${err}. Will use in-memory instead." + ) + InMemory + } + } catch { + case NonFatal(e) => + logger.warn("Failed to acquire ammonite cache directory. Will use in-memory instead.", e) + InMemory + } + } + + private def createDirsIfNecessary(path: os.Path): Either[String, Unit] = + if (path.toIO.exists()) + Either.cond(path.toIO.isDirectory, (), s"File ${path} exists but is not a directory") + else + Either.cond( + // create or test again (mkdirs fails if the directory exists in the meantime, which can happen + // if several tests try to create the directory at the same time + path.toIO.mkdirs() || path.toIO.exists(), + (), + s"Failed to create ammonite cache directory ${path}. Is the path writable?", + ) + + private def ensureDirIsWritable(path: os.Path): Either[String, Unit] = { + Either.cond(path.toIO.canWrite, (), s"Directory $path is not writable") + } + + private def acquireLock(logger: TracedLogger, path: os.Path, isRepl: Boolean)(implicit + traceContext: TraceContext + ): Either[Throwable, Option[AmmoniteCacheLock]] = blocking(synchronized { + try { + val myLockFile = path / "lock" + if (myLockFile.toIO.exists()) { + Right(None) + } else { + logger.debug(s"Attempting to obtain lock ${myLockFile}") + val out = new RandomAccessFile(myLockFile.toIO, "rw") + Option(out.getChannel.tryLock()) match { + case None => + logger.debug(s"Failed to acquire lock for ${myLockFile}") + out.close() + Right(None) + case Some(lock) => + myLockFile.toIO.deleteOnExit() + Right(Some(new AmmoniteCacheLock { + override def release(): Unit = { + try { + logger.debug(s"Releasing lock $myLockFile...") + lock.release() + out.close() + if (!myLockFile.toIO.delete()) { + logger.warn(s"Failed to delete lock file ${myLockFile}") + } + } catch { + case NonFatal(e) => + logger.error(s"Releasing ammonite cache lock $lockFile failed", e) + } + } + + override val storage: Storage = new Storage.Folder(path, isRepl = isRepl) + + override def toString: String = s"file cache at $path" + + override def lockFile: Option[File] = Some(myLockFile.toIO) + })) + + } + } + } catch { + case e: OverlappingFileLockException => Right(None) + case NonFatal(e) => Left(e) + } + }) + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/AmmoniteConsoleConfig.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/AmmoniteConsoleConfig.scala new file mode 100644 index 0000000000..1c7f9f2c13 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/AmmoniteConsoleConfig.scala @@ -0,0 +1,91 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import ammonite.Main +import ammonite.main.Defaults +import ammonite.util.Colors +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.logging.TracedLogger +import com.typesafe.scalalogging.LazyLogging + +import java.io.{File, IOException} + +/** Configure behaviour of ammonite + * + * @param cacheDir cache dir, defaults to ~/.ammonite. If none is given, in-memory is used. + * If you specify a cache dir, the Canton repl will startup faster. + * In our tests, we have very rarely observed unexpected compile errors when the cache was enabled; + * if you want to avoid that, set the cache dir to None (i.e. `cache-dir = null` in the config file). + * @param workingDir working directory. if none is given, we'll use the working directory of the Canton process + * @param colors if true (default), we'll use color output + * @param verbose if true (not default), we'll emit verbose ammonite output + * @param defaultLimit default limit parameter for commands that can potentially return many results + */ +final case class AmmoniteConsoleConfig( + cacheDir: Option[java.io.File] = AmmoniteConsoleConfig.defaultCacheDir, + workingDir: Option[java.io.File] = None, + colors: Boolean = true, + verbose: Boolean = false, + defaultLimit: PositiveInt = PositiveInt.tryCreate(1000), +) + +object AmmoniteConsoleConfig extends LazyLogging { + + private def defaultCacheDir: Option[java.io.File] = { + val f = new File(System.getProperty("user.home")) + if (f.exists() && f.isDirectory) + Some(Defaults.ammoniteHome.toIO) + else { + logger.warn( + s"""Can not determine user home directory using the java system property `user.home` + | (is set to ${System.getProperty("user.home")}). Please set it + |on jvm startup using -Duser.home=...""".stripMargin + ) + None + } + } + + private def ensureTmpFilesCanBeCreated(): Unit = { + try { + val f = File.createTempFile("dummy", "test") + val _ = f.delete() + } catch { + case e: IOException => + logger.error( + "Unable to create temporary files (javas `File.createTempFile` throws an exception). Please make sure that the jvm can create files. The process will likely start to fail now.", + e, + ) + } + } + + private[console] def create( + config: AmmoniteConsoleConfig, + predefCode: String, + welcomeBanner: Option[String], + isRepl: Boolean, + logger: TracedLogger, + ): (AmmoniteCacheLock, Main) = { + val cacheLock: AmmoniteCacheLock = config.cacheDir match { + case Some(file) => AmmoniteCacheLock.create(logger, os.Path(file), isRepl = isRepl) + case None => AmmoniteCacheLock.InMemory + } + // ensure that we can create tmp files + ensureTmpFilesCanBeCreated() + val main = Main( + predefCode = predefCode, + storageBackend = cacheLock.storage, + wd = config.workingDir.fold(os.pwd)(x => os.Path(x.getAbsolutePath)), + welcomeBanner = welcomeBanner, + verboseOutput = + config.verbose, // disable things like "Compiling [x]..." messages from ammonite + // ammonite when run as a binary will log the number of commands that are executed in a session for the maintainer to see usage + // I don't think this happens when used in an embedded fashion like we're doing, but let's disable just to be sure ( ˇ෴ˇ ) + remoteLogging = false, + colors = if (config.colors) Colors.Default else Colors.BlackWhite, + ) + (cacheLock, main) + } + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/BootstrapScriptException.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/BootstrapScriptException.scala new file mode 100644 index 0000000000..2b1f6f7f99 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/BootstrapScriptException.scala @@ -0,0 +1,8 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +/** Thrown when the bootstrap script fails to execute */ +class BootstrapScriptException(cause: Throwable) + extends RuntimeException(s"Bootstrap script failed: ${cause.getMessage}", cause) diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/CommunityCantonHealthAdministration.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/CommunityCantonHealthAdministration.scala new file mode 100644 index 0000000000..0a9511ccb6 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/CommunityCantonHealthAdministration.scala @@ -0,0 +1,195 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import better.files.File +import cats.data.NonEmptyList +import cats.syntax.parallel.* +import cats.syntax.traverse.* +import com.codahale.metrics +import com.digitalasset.canton.admin.api.client.data.{CantonStatus, CommunityCantonStatus} +import com.digitalasset.canton.config.RequireTypes.Port +import com.digitalasset.canton.config.{NonNegativeDuration, Password} +import com.digitalasset.canton.health.admin.data.NodeStatus +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.topology.DomainId +import com.digitalasset.canton.tracing.NoTracing +import com.digitalasset.canton.util.FutureInstances.* +import io.circe.{Encoder, Json, KeyEncoder, jawn} +import io.opentelemetry.exporter.internal.otlp.metrics.ResourceMetricsMarshaler +import io.opentelemetry.sdk.metrics.data.MetricData + +import java.io.ByteArrayOutputStream +import java.time.Instant +import scala.concurrent.duration.TimeUnit +import scala.concurrent.{Await, ExecutionContext, Future, TimeoutException} +import scala.jdk.CollectionConverters.SeqHasAsJava +import scala.util.control.NonFatal +import scala.util.{Failure, Success, Try} + +object CantonHealthAdministrationEncoders { + implicit val timeUnitEncoder: Encoder[TimeUnit] = Encoder.encodeString.contramap(_.toString) + + implicit val snapshotEncoder: Encoder[metrics.Snapshot] = + Encoder.forProduct4("mean", "std-dev", "p95", "median") { snapshot => + def toMs(nanos: Double): Double = nanos / 1e6 + ( + toMs(snapshot.getMean), + toMs(snapshot.getStdDev), + toMs(snapshot.get95thPercentile()), + toMs(snapshot.getMedian), + ) + } + + implicit val counterEncoder: Encoder[metrics.Counter] = Encoder.forProduct1("count") { counter => + counter.getCount + } + implicit val gaugeEncoder: Encoder[metrics.Gauge[_]] = Encoder.forProduct1("gauge") { gauge => + gauge.getValue.toString + } + implicit val histoEncoder: Encoder[metrics.Histogram] = + Encoder.forProduct1("hist")(_.getSnapshot) + + implicit val meterEncoder: Encoder[metrics.Meter] = + Encoder.forProduct3("count", "one-min-rate", "five-min-rate") { meter => + (meter.getCount, meter.getFiveMinuteRate, meter.getOneMinuteRate) + } + + implicit val timerEncoder: Encoder[metrics.Timer] = + Encoder.forProduct4("count", "one-min-rate", "five-min-rate", "hist") { timer => + (timer.getCount, timer.getFiveMinuteRate, timer.getOneMinuteRate, timer.getSnapshot) + } + + /** Wraps the standardized log writer from OpenTelemetry, that outputs the metrics as JSON + * Source: https://github.com/open-telemetry/opentelemetry-java/blob/main/exporters/logging-otlp/src/main/java/io/opentelemetry/exporter/logging/otlp/OtlpJsonLoggingMetricExporter.java + * The encoder is not the most efficient as we first use the OpenTelemetry JSON serializer to write as a String, + * and then use the Circe Jawn decoder to transform the string into a circe.Json object. + * This is fine as the encoder is used only for on demand health dumps. + */ + implicit val openTelemetryMetricDataEncoder: Encoder[Seq[MetricData]] = + Encoder.encodeSeq[Json].contramap[Seq[MetricData]] { metrics => + val resourceMetrics = ResourceMetricsMarshaler.create(metrics.asJava) + resourceMetrics.toSeq.map { resource => + val byteArrayOutputStream = new ByteArrayOutputStream() + resource.writeJsonTo(byteArrayOutputStream) + jawn + .decode[Json](byteArrayOutputStream.toString) + .fold( + error => Json.fromString(s"Failed to decode metrics: $error"), + identity, + ) + } + } + + implicit val traceElemEncoder: Encoder[StackTraceElement] = + Encoder.encodeString.contramap(_.toString) + implicit val threadKeyEncoder: KeyEncoder[Thread] = (thread: Thread) => thread.getName + + implicit val domainIdEncoder: KeyEncoder[DomainId] = (ref: DomainId) => ref.toString + + implicit val encodePort: Encoder[Port] = Encoder.encodeInt.contramap[Port](_.unwrap) + + // We do not want to serialize the password to JSON, e.g., as part of a config dump. + implicit val encoder: Encoder[Password] = Encoder.encodeString.contramap(_ => "****") +} + +object CantonHealthAdministration { + def defaultHealthDumpName: File = { + // Replace ':' in the timestamp as they are forbidden on windows + val name = s"canton-dump-${Instant.now().toString.replace(':', '-')}.zip" + File(name) + } +} + +trait CantonHealthAdministration[Status <: CantonStatus] + extends Helpful + with NamedLogging + with NoTracing { + protected val consoleEnv: ConsoleEnvironment + implicit private val ec: ExecutionContext = consoleEnv.environment.executionContext + override val loggerFactory: NamedLoggerFactory = consoleEnv.environment.loggerFactory + + protected def statusMap[A <: InstanceReferenceCommon]( + nodes: NodeReferences[A, _, _] + ): Map[String, () => NodeStatus[A#Status]] = { + nodes.all.map { node => node.name -> (() => node.health.status) }.toMap + } + + def status(): Status + + @Help.Summary("Generate and write a health dump of Canton's state for a bug report") + @Help.Description( + "Gathers information about the current Canton process and/or remote nodes if using the console" + + " with a remote config. The outputFile argument can be used to write the health dump to a specific path." + + " The timeout argument can be increased when retrieving large health dumps from remote nodes." + + " The chunkSize argument controls the size of the byte chunks streamed back from remote nodes. This can be used" + + " if encountering errors due to gRPC max inbound message size being too low." + ) + def dump( + outputFile: File = CantonHealthAdministration.defaultHealthDumpName, + timeout: NonNegativeDuration = consoleEnv.commandTimeouts.ledgerCommand, + chunkSize: Option[Int] = None, + ): String = { + val remoteDumps = consoleEnv.nodes.remote.toList.parTraverse { n => + Future { + n.health.dump( + File.newTemporaryFile(s"remote-${n.name}-"), + timeout, + chunkSize, + ) + } + } + + // Try to get a local dump by going through the local nodes and returning the first one that succeeds + def getLocalDump(nodes: NonEmptyList[InstanceReferenceCommon]): Future[String] = { + Future { + nodes.head.health.dump( + File.newTemporaryFile(s"local-"), + timeout, + chunkSize, + ) + }.recoverWith { case NonFatal(e) => + NonEmptyList.fromList(nodes.tail) match { + case Some(tail) => + logger.info( + s"Could not get health dump from ${nodes.head.name}, trying the next local node", + e, + ) + getLocalDump(tail) + case None => Future.failed(e) + } + } + } + + val localDump = NonEmptyList + // The sorting is not necessary but makes testing easier + .fromList(consoleEnv.nodes.local.toList.sortBy(_.name)) + .traverse(getLocalDump) + .map(_.toList) + + consoleEnv.run { + val zippedHealthDump = List(remoteDumps, localDump).flatSequence.map { allDumps => + outputFile.zipIn(allDumps.map(File(_)).iterator).pathAsString + } + Try(Await.result(zippedHealthDump, timeout.duration)) match { + case Success(result) => CommandSuccessful(result) + case Failure(e: TimeoutException) => + CommandErrors.ConsoleTimeout.Error(timeout.asJavaApproximation) + case Failure(exception) => CommandErrors.CommandInternalError.ErrorWithException(exception) + } + } + } +} + +class CommunityCantonHealthAdministration(override val consoleEnv: ConsoleEnvironment) + extends CantonHealthAdministration[CommunityCantonStatus] { + + @Help.Summary("Aggregate status info of all participants and domains") + def status(): CommunityCantonStatus = { + CommunityCantonStatus.getStatus( + statusMap[DomainReference](consoleEnv.domains), + statusMap[ParticipantReference](consoleEnv.participants), + ) + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/CommunityHealthDumpGenerator.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/CommunityHealthDumpGenerator.scala new file mode 100644 index 0000000000..8272a28c0a --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/CommunityHealthDumpGenerator.scala @@ -0,0 +1,31 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import com.digitalasset.canton.admin.api.client.data.CommunityCantonStatus +import com.digitalasset.canton.environment.CommunityEnvironment +import com.digitalasset.canton.health.admin.data.{DomainStatus, ParticipantStatus} +import io.circe.Encoder +import io.circe.generic.semiauto.deriveEncoder + +import scala.annotation.nowarn + +@nowarn("cat=lint-byname-implicit") // https://github.com/scala/bug/issues/12072 +class CommunityHealthDumpGenerator( + override val environment: CommunityEnvironment, + override val grpcAdminCommandRunner: GrpcAdminCommandRunner, +) extends HealthDumpGenerator[CommunityCantonStatus] { + override protected implicit val statusEncoder: Encoder[CommunityCantonStatus] = { + import io.circe.generic.auto.* + import CantonHealthAdministrationEncoders.* + deriveEncoder[CommunityCantonStatus] + } + + override def status(): CommunityCantonStatus = { + CommunityCantonStatus.getStatus( + statusMap(environment.config.domainsByString, DomainStatus.fromProtoV0), + statusMap(environment.config.participantsByString, ParticipantStatus.fromProtoV0), + ) + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleCommandResult.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleCommandResult.scala new file mode 100644 index 0000000000..ff087f255c --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleCommandResult.scala @@ -0,0 +1,178 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import cats.Monad +import cats.syntax.alternative.* +import com.daml.error.{ErrorCategory, ErrorCode} +import com.digitalasset.canton.console.CommandErrors.{CommandError, GenericCommandError} +import com.digitalasset.canton.error.CantonErrorGroups.CommandErrorGroup +import com.digitalasset.canton.error.* +import com.digitalasset.canton.util.ErrorUtil +import org.slf4j.event.Level + +import java.time.Duration +import scala.util.{Failure, Success, Try} + +/** Response from a console command. + */ +sealed trait ConsoleCommandResult[+A] { + def toEither: Either[String, A] + + def flatMap[B](f: A => ConsoleCommandResult[B]): ConsoleCommandResult[B] = this match { + case CommandSuccessful(a) => f(a) + case err: CommandError => err + } + + def map[B](f: A => B): ConsoleCommandResult[B] = this match { + case CommandSuccessful(a) => CommandSuccessful(f(a)) + case err: CommandError => err + } +} + +object ConsoleCommandResult { + + implicit val consoleCommandResultMonad: Monad[ConsoleCommandResult] = + new Monad[ConsoleCommandResult] { + override def flatMap[A, B](fa: ConsoleCommandResult[A])( + f: A => ConsoleCommandResult[B] + ): ConsoleCommandResult[B] = fa.flatMap(f) + + override def tailRecM[A, B]( + a: A + )(f: A => ConsoleCommandResult[Either[A, B]]): ConsoleCommandResult[B] = { + def go(ccr: ConsoleCommandResult[Either[A, B]]): ConsoleCommandResult[B] = ccr match { + case CommandSuccessful(Left(a)) => go(f(a)) + case CommandSuccessful(Right(b)) => CommandSuccessful(b) + case err: CommandError => err + } + go(CommandSuccessful(Left(a))) + } + + override def pure[A](x: A): ConsoleCommandResult[A] = CommandSuccessful(x) + } + + def fromEither[A](either: Either[String, A]): ConsoleCommandResult[A] = + either match { + case Left(err) => GenericCommandError(err) + case Right(value) => CommandSuccessful(value) + } + + private[console] def runAll[Instance <: InstanceReferenceCommon, Result]( + instances: Seq[Instance] + )( + action: Instance => ConsoleCommandResult[Result] + )(implicit consoleEnvironment: ConsoleEnvironment): Map[Instance, Result] = + consoleEnvironment.run { + forAll(instances)(action) + } + + /** Call a console command on all instances. + * Will run all in sequence and will merge all failures. + * If nothing fails, the final CommandSuccessful result will be returned. + * @param action Action to perform on instances + * @return Successful if the action was successful for all instances, otherwise all the errors encountered merged into one. + */ + private[console] def forAll[Instance <: InstanceReferenceCommon, Result]( + instances: Seq[Instance] + )( + action: Instance => ConsoleCommandResult[Result] + ): ConsoleCommandResult[Map[Instance, Result]] = { + val (errors, results) = instances + .map(instance => instance -> Try(action(instance))) + .map { + case (instance, Success(CommandSuccessful(value))) => Right(instance -> value) + case (instance, Success(err: CommandError)) => + Left( + s"(failure on ${instance.name}): ${err.cause}" + ) + case (instance, Failure(t)) => + Left(s"(exception on ${instance.name}: ${ErrorUtil.messageWithStacktrace(t)}") + } + .toList + .separate + if (errors.isEmpty) { + CommandSuccessful(results.toMap) + } else { + GenericCommandError( + s"Command failed on ${errors.length} out of ${instances.length} instances: ${errors.mkString(", ")}" + ) + } + } +} + +/** Successful command result + * @param value The value returned from the command + */ +final case class CommandSuccessful[+A](value: A) extends ConsoleCommandResult[A] { + override lazy val toEither: Either[String, A] = Right(value) +} + +object CommandSuccessful { + def apply(): CommandSuccessful[Unit] = CommandSuccessful(()) +} + +// Each each in object CommandErrors, will have an error code that begins with `CA12` ('CA1' due to inheritance from CommunityAppError, '2' due to the argument) +object CommandErrors extends CommandErrorGroup { + + sealed trait CommandError extends ConsoleCommandResult[Nothing] { + override lazy val toEither: Either[String, Nothing] = Left(cause) + def cause: String + } + + sealed abstract class CantonCommandError( + override val cause: String, + override val throwableO: Option[Throwable] = None, + )(implicit override val code: ErrorCode) + extends BaseCantonError + with CommandError + + sealed abstract class CommandErrorCode(id: String, category: ErrorCategory) + extends ErrorCode(id, category) { + override def errorConveyanceDocString: Option[String] = Some( + "These errors are shown as errors on the console." + ) + } + + object CommandInternalError + extends CommandErrorCode( + "CONSOLE_COMMAND_INTERNAL_ERROR", + ErrorCategory.SystemInternalAssumptionViolated, + ) { + final case class ErrorWithException(throwable: Throwable) + extends CantonCommandError( + "An internal error has occurred while running a console command.", + Some(throwable), + ) + final case class NullError() + extends CantonCommandError("Console command has returned 'null' as result.") + } + + // The majority of the use cases of this error are for generic Either[..., ...] => ConsoleCommandResult[...] conversions + // Thus, it doesn't have an error code because the underlying error that is wrapped should provide the error code + // TODO(i6183) - replace uses of this wrapper with a CantonParentError wrapper except when parsing gRPC errors + final case class GenericCommandError(cause: String) + extends ConsoleCommandResult[Nothing] + with CommandError + + object ConsoleTimeout + extends CommandErrorCode( + "CONSOLE_COMMAND_TIMED_OUT", + ErrorCategory.SystemInternalAssumptionViolated, + ) { + final case class Error(timeout: Duration) + extends CantonCommandError(s"Condition never became true after ${timeout}") + } + + object NodeNotStarted + extends CommandErrorCode( + "NODE_NOT_STARTED", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + override def logLevel: Level = Level.ERROR + final case class ErrorCanton(instance: LocalInstanceReferenceCommon) + extends CantonCommandError(s"Instance $instance has not been started. ") + } + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleEnvironment.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleEnvironment.scala new file mode 100644 index 0000000000..39e7e860c0 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleEnvironment.scala @@ -0,0 +1,646 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import ammonite.util.Bind +import cats.syntax.either.* +import com.digitalasset.canton.admin.api.client.data.CantonStatus +import com.digitalasset.canton.config.CantonRequireTypes.InstanceName +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveDouble, PositiveInt} +import com.digitalasset.canton.config.{ + ConsoleCommandTimeout, + NonNegativeDuration, + NonNegativeFiniteDuration, + PositiveDurationSeconds, + ProcessingTimeout, +} +import com.digitalasset.canton.console.CommandErrors.{ + CantonCommandError, + CommandInternalError, + GenericCommandError, +} +import com.digitalasset.canton.console.Help.{Description, Summary, Topic} +import com.digitalasset.canton.crypto.Fingerprint +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.environment.Environment +import com.digitalasset.canton.lifecycle.{FlagCloseable, Lifecycle} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.protocol.SerializableContract +import com.digitalasset.canton.sequencing.{ + GrpcSequencerConnection, + SequencerConnection, + SequencerConnections, +} +import com.digitalasset.canton.time.SimClock +import com.digitalasset.canton.topology.{Identifier, ParticipantId, PartyId} +import com.digitalasset.canton.tracing.{NoTracing, TraceContext, TracerProvider} +import com.digitalasset.canton.util.EitherUtil +import com.digitalasset.canton.{DomainAlias, LfPartyId} +import com.typesafe.scalalogging.Logger +import io.opentelemetry.api.trace.Tracer + +import java.time.{Duration as JDuration, Instant} +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.duration.Duration as SDuration +import scala.reflect.runtime.universe as ru +import scala.util.control.NonFatal + +final case class NodeReferences[A, R <: A, L <: A](local: Seq[L], remote: Seq[R]) { + val all: Seq[A] = local ++ remote +} + +/** The environment in which console commands are evaluated. + */ +@SuppressWarnings(Array("org.wartremover.warts.Any")) // required for `Binding[_]` usage +trait ConsoleEnvironment extends NamedLogging with FlagCloseable with NoTracing { + type Env <: Environment + type DomainLocalRef <: LocalDomainReference + type DomainRemoteRef <: RemoteDomainReference + type Status <: CantonStatus + + def consoleLogger: Logger = super.noTracingLogger + + def health: CantonHealthAdministration[Status] + + /** the underlying Canton runtime environment */ + val environment: Env + + /** determines the control exception thrown on errors */ + val errorHandler: ConsoleErrorHandler = ThrowErrorHandler + + /** the console for user facing output */ + val consoleOutput: ConsoleOutput + + /** The predef code itself which is executed before any script or repl command */ + private[console] def predefCode(interactive: Boolean, noTty: Boolean = false): String = + consoleEnvironmentBindings.predefCode(interactive, noTty) + protected def consoleEnvironmentBindings: ConsoleEnvironmentBinding + + private val tracerProvider = + TracerProvider.Factory(environment.configuredOpenTelemetry, "console") + private[console] val tracer: Tracer = tracerProvider.tracer + + /** Definition of the startup order of local instances. + * Nodes support starting up in any order however to avoid delays/warnings we opt to start in the most desirable order + * for simple execution. (e.g. domains started before participants). + * Implementations should just return a int for the instance (typically just a static value based on type), + * and then the console will start these instances for lower to higher values. + */ + protected def startupOrderPrecedence(instance: LocalInstanceReferenceCommon): Int + + /** The order that local nodes would ideally be started in. */ + final val startupOrdering: Ordering[LocalInstanceReferenceCommon] = + (x: LocalInstanceReferenceCommon, y: LocalInstanceReferenceCommon) => + startupOrderPrecedence(x) compare startupOrderPrecedence(y) + + /** allows for injecting a custom admin command runner during tests */ + protected def createAdminCommandRunner: ConsoleEnvironment => ConsoleGrpcAdminCommandRunner + + protected override val loggerFactory: NamedLoggerFactory = environment.loggerFactory + + private val commandTimeoutReference: AtomicReference[ConsoleCommandTimeout] = + new AtomicReference[ConsoleCommandTimeout](environment.config.parameters.timeouts.console) + + private val featureSetReference: AtomicReference[HelperItems] = + new AtomicReference[HelperItems](HelperItems(environment.config.features.featureFlags)) + + /** Generate implementation specific help items for local domains */ + protected def localDomainHelpItems( + scope: Set[FeatureFlag], + localDomain: DomainLocalRef, + ): Seq[Help.Item] + + /** Generate implementation specific help items for remote domains */ + protected def remoteDomainHelpItems( + scope: Set[FeatureFlag], + remoteDomain: DomainRemoteRef, + ): Seq[Help.Item] + + private case class HelperItems(scope: Set[FeatureFlag]) { + lazy val participantHelperItems = { + // due to the use of reflection to grab the help-items, i need to write the following, repetitive stuff explicitly + val subItems = + if (participants.local.nonEmpty) + participants.local.headOption.toList.flatMap(p => + Help.getItems(p, baseTopic = Seq("$participant"), scope = scope) + ) + else if (participants.remote.nonEmpty) + participants.remote.headOption.toList.flatMap(p => + Help.getItems(p, baseTopic = Seq("$participant"), scope = scope) + ) + else Seq() + Help.Item("$participant", None, Summary(""), Description(""), Topic(Seq()), subItems) + } + + lazy val domainHelperItems = { + val subItems = + if (domains.local.nonEmpty) + domains.local.headOption.toList.flatMap(localDomainHelpItems(scope, _)) + else if (domains.remote.nonEmpty) + domains.remote.headOption.toList.flatMap(remoteDomainHelpItems(scope, _)) + else Seq() + Help.Item("$domain", None, Summary(""), Description(""), Topic(Seq()), subItems) + } + + lazy val filteredHelpItems = { + helpItems.filter(x => scope.contains(x.summary.flag)) + } + + lazy val all = filteredHelpItems :+ participantHelperItems :+ domainHelperItems + + } + + protected def timeouts: ProcessingTimeout = environment.config.parameters.timeouts.processing + + /** @return maximum runtime of a console command + */ + def commandTimeouts: ConsoleCommandTimeout = commandTimeoutReference.get() + + def setCommandTimeout(newTimeout: NonNegativeDuration): Unit = { + require(newTimeout.duration > SDuration.Zero, "The command timeout must be positive!") + commandTimeoutReference.updateAndGet(cur => cur.copy(bounded = newTimeout)).discard + } + + def setLedgerCommandTimeout(newTimeout: NonNegativeDuration): Unit = { + require(newTimeout.duration > SDuration.Zero, "The ledger command timeout must be positive!") + commandTimeoutReference.updateAndGet(cur => cur.copy(ledgerCommand = newTimeout)).discard + } + + /** returns the currently enabled feature sets */ + def featureSet: Set[FeatureFlag] = featureSetReference.get().scope + + def updateFeatureSet(flag: FeatureFlag, include: Boolean): Unit = { + val _ = featureSetReference.updateAndGet { x => + val scope = if (include) x.scope + flag else x.scope - flag + HelperItems(scope) + } + } + + /** Holder for top level values including their name, their value, and a description to display when `help` is printed. + */ + protected case class TopLevelValue[T]( + nameUnsafe: String, + summary: String, + value: T, + topic: Seq[String] = Seq(), + )(implicit tag: ru.TypeTag[T]) { + + // Surround with back-ticks to handle the case that name is a reserved keyword in scala. + lazy val asBind: Either[InstanceName.InvalidInstanceName, Bind[T]] = + InstanceName.create(nameUnsafe).map(name => Bind(s"`${name.unwrap}`", value)) + + lazy val asHelpItem: Help.Item = + Help.Item(nameUnsafe, None, Help.Summary(summary), Help.Description(""), Help.Topic(topic)) + } + + object TopLevelValue { + + /** Provide all details but the value itself. A subsequent call can then specify the value from another location. + * This oddness is to allow the ConsoleEnvironment implementations to specify the values of node instances they + * use as scala's runtime reflection can't easily take advantage of the type members we have available here. + */ + case class Partial(name: String, summary: String, topics: Seq[String] = Seq.empty) { + def apply[T](value: T)(implicit t: ru.TypeTag[T]): TopLevelValue[T] = + TopLevelValue(name, summary, value, topics) + } + } + + // lazy to prevent publication of this before this has been fully initialized + lazy val grpcAdminCommandRunner: ConsoleGrpcAdminCommandRunner = createAdminCommandRunner(this) + + def runE[E, A](result: => Either[E, A]): A = { + run(ConsoleCommandResult.fromEither(result.leftMap(_.toString))) + } + + /** Run a console command. + */ + @SuppressWarnings(Array("org.wartremover.warts.Null")) + def run[A](result: => ConsoleCommandResult[A]): A = { + val resultValue: ConsoleCommandResult[A] = + try { + result + } catch { + case err: Throwable => + CommandInternalError.ErrorWithException(err).logWithContext() + err match { + case NonFatal(_) => + // No need to rethrow err, as it has been logged and output + errorHandler.handleInternalError() + case _ => + // Rethrow err, as it is a bad practice to discard fatal errors. + // As a result, the error may be printed several times, + // but there is no guarantee that the log is still working. + // So it is better to err on the safe side. + throw err + } + } + + def invocationContext(): Map[String, String] = + findInvocationSite() match { + case Some((funcName, callSite)) => Map("function" -> funcName, "callsite" -> callSite) + case None => Map() + } + + resultValue match { + case null => + CommandInternalError.NullError().logWithContext(invocationContext()) + errorHandler.handleInternalError() + case CommandSuccessful(value) => + value + case err: CantonCommandError => + err.logWithContext(invocationContext()) + errorHandler.handleCommandFailure() + case err: GenericCommandError => + val errMsg = findInvocationSite() match { + case Some((funcName, site)) => + err.cause + s"\n Command ${funcName} invoked from ${site}" + case None => err.cause + } + logger.error(errMsg) + errorHandler.handleCommandFailure() + } + } + + private def findInvocationSite(): Option[(String, String)] = { + val stack = Thread.currentThread().getStackTrace + // assumption: first few stack elements are all in our set of known packages. our call-site is + // the first entry outside of our package + // also skip all scala packages because a collection's map operation is not an informative call site + val myPackages = + Seq("com.digitalasset.canton.console", "com.digitalasset.canton.environment", "scala.") + + def isKnown(element: StackTraceElement): Boolean = + myPackages.exists(element.getClassName.startsWith) + + stack.sliding(2).collectFirst { + case Array(callee, caller) if isKnown(callee) && !isKnown(caller) => + val drop = callee.getClassName.lastIndexOf(".") + 1 + val funcName = callee.getClassName.drop(drop) + "." + callee.getMethodName + (funcName, s"${caller.getFileName}:${caller.getLineNumber}") + } + + } + + /** Print help for items in the top level scope. + */ + def help(): Unit = { + consoleOutput.info(Help.format(featureSetReference.get().filteredHelpItems: _*)) + } + + /** Print detailed help for a top-level item in the top level scope. + */ + def help(cmd: String): Unit = + consoleOutput.info(Help.forMethod(featureSetReference.get().all, cmd)) + + def helpItems: Seq[Help.Item] = + topLevelValues.map(_.asHelpItem) ++ + Help.fromObject(ConsoleMacros) ++ + Help.fromObject(this) :+ + ( + Help.Item( + "help", + None, + Help.Summary( + "Help with console commands; type help(\"\") for detailed help for " + ), + Help.Description(""), + Help.Topic(Help.defaultTopLevelTopic), + ), + ) :+ + (Help.Item( + "exit", + None, + Help.Summary("Leave the console"), + Help.Description(""), + Help.Topic(Help.defaultTopLevelTopic), + )) + + lazy val participants: NodeReferences[ + ParticipantReference, + RemoteParticipantReference, + LocalParticipantReference, + ] = + NodeReferences( + environment.config.participantsByString.keys.map(createParticipantReference).toSeq, + environment.config.remoteParticipantsByString.keys + .map(createRemoteParticipantReference) + .toSeq, + ) + + lazy val participantsX: NodeReferences[ + ParticipantReferenceX, + RemoteParticipantReferenceX, + LocalParticipantReferenceX, + ] = + NodeReferences( + environment.config.participantsByStringX.keys.map(createParticipantReferenceX).toSeq, + environment.config.remoteParticipantsByStringX.keys + .map(createRemoteParticipantReferenceX) + .toSeq, + ) + + lazy val domains: NodeReferences[DomainReference, DomainRemoteRef, DomainLocalRef] = + NodeReferences( + environment.config.domainsByString.keys.map(createDomainReference).toSeq, + environment.config.remoteDomainsByString.keys.map(createRemoteDomainReference).toSeq, + ) + + // the scala compiler / wartremover gets confused here if I use ++ directly + def mergeLocalInstances( + locals: Seq[LocalInstanceReferenceCommon]* + ): Seq[LocalInstanceReferenceCommon] = + locals.flatten + def mergeRemoteInstances(remotes: Seq[InstanceReferenceCommon]*): Seq[InstanceReferenceCommon] = + remotes.flatten + + lazy val nodes: NodeReferences[ + InstanceReferenceCommon, + InstanceReferenceCommon, + LocalInstanceReferenceCommon, + ] = { + NodeReferences( + mergeLocalInstances(participants.local, participantsX.local, domains.local), + mergeRemoteInstances(participants.remote, participantsX.remote, domains.remote), + ) + } + + protected def helpText(typeName: String, name: String) = + s"Manage $typeName '${name}'; type '${name} help' or '${name} help" + "(\"\")' for more help" + + protected val topicNodeReferences = "Node References" + protected val topicGenericNodeReferences = "Generic Node References" + protected val genericNodeReferencesDoc = " (.all, .local, .remote)" + + protected def domainsTopLevelValue( + h: TopLevelValue.Partial, + domains: NodeReferences[DomainReference, DomainRemoteRef, DomainLocalRef], + ): TopLevelValue[NodeReferences[DomainReference, DomainRemoteRef, DomainLocalRef]] + + /** Supply the local domain value used by the implementation */ + protected def localDomainTopLevelValue( + h: TopLevelValue.Partial, + d: DomainLocalRef, + ): TopLevelValue[DomainLocalRef] + + /** Supply the remote domain value used by the implementation */ + protected def remoteDomainTopLevelValue( + h: TopLevelValue.Partial, + d: DomainRemoteRef, + ): TopLevelValue[DomainRemoteRef] + + /** Assemble top level values with their identifier name, value binding, and help description. + */ + protected def topLevelValues: Seq[TopLevelValue[_]] = { + val nodeTopic = Seq(topicNodeReferences) + val localParticipantBinds: Seq[TopLevelValue[_]] = + participants.local.map(p => + TopLevelValue(p.name, helpText("participant", p.name), p, nodeTopic) + ) + val remoteParticipantBinds: Seq[TopLevelValue[_]] = + participants.remote.map(p => + TopLevelValue(p.name, helpText("remote participant", p.name), p, nodeTopic) + ) + val localParticipantXBinds: Seq[TopLevelValue[_]] = + participantsX.local.map(p => + TopLevelValue(p.name, helpText("participant x", p.name), p, nodeTopic) + ) + val remoteParticipantXBinds: Seq[TopLevelValue[_]] = + participantsX.remote.map(p => + TopLevelValue(p.name, helpText("remote participant x", p.name), p, nodeTopic) + ) + val localDomainBinds: Seq[TopLevelValue[_]] = + domains.local.map(d => + localDomainTopLevelValue( + TopLevelValue.Partial(d.name, helpText("local domain", d.name), nodeTopic), + d, + ) + ) + val remoteDomainBinds: Seq[TopLevelValue[_]] = + domains.remote.map(d => + remoteDomainTopLevelValue( + TopLevelValue.Partial(d.name, helpText("remote domain", d.name), nodeTopic), + d, + ) + ) + val clockBinds: Option[TopLevelValue[_]] = + environment.simClock.map(cl => + TopLevelValue("clock", "Simulated time", new SimClockCommand(cl)) + ) + val referencesTopic = Seq(topicGenericNodeReferences) + localParticipantBinds ++ remoteParticipantBinds ++ + localParticipantXBinds ++ remoteParticipantXBinds ++ + localDomainBinds ++ remoteDomainBinds ++ clockBinds.toList :+ + TopLevelValue( + "participants", + "All participant nodes" + genericNodeReferencesDoc, + participants, + referencesTopic, + ) :+ + TopLevelValue( + "participantsX", + "All participant x nodes" + genericNodeReferencesDoc, + participantsX, + referencesTopic, + ) :+ + domainsTopLevelValue( + TopLevelValue + .Partial("domains", "All domain nodes" + genericNodeReferencesDoc, referencesTopic), + domains, + ) :+ + TopLevelValue("nodes", "All nodes" + genericNodeReferencesDoc, nodes, referencesTopic) + } + + /** Bindings for ammonite + * Add a reference to this instance to resolve implicit references within the console + */ + lazy val bindings: Either[RuntimeException, IndexedSeq[Bind[_]]] = { + import cats.syntax.traverse.* + for { + bindsWithoutSelfAlias <- topLevelValues.traverse(_.asBind) + binds = bindsWithoutSelfAlias :+ selfAlias() + _ <- validateNameUniqueness(binds) + } yield binds.toIndexedSeq + } + + private def validateNameUniqueness(binds: Seq[Bind[_]]) = { + val nonUniqueNames = + binds.map(_.name).groupBy(identity).collect { + case (name, occurrences) if occurrences.sizeIs > 1 => + s"$name (${occurrences.size} occurrences)" + } + EitherUtil.condUnitE( + nonUniqueNames.isEmpty, + new IllegalStateException( + s"""Node names must be unique and must differ from reserved keywords. Please revisit node names in your config file. + |Offending names: ${nonUniqueNames.mkString("(", ", ", ")")}""".stripMargin + ), + ) + } + + private def createParticipantReference(name: String): LocalParticipantReference = + new LocalParticipantReference(this, name) + private def createRemoteParticipantReference(name: String): RemoteParticipantReference = + new RemoteParticipantReference(this, name) + private def createParticipantReferenceX(name: String): LocalParticipantReferenceX = + new LocalParticipantReferenceX(this, name) + private def createRemoteParticipantReferenceX(name: String): RemoteParticipantReferenceX = + new RemoteParticipantReferenceX(this, name) + + protected def createDomainReference(name: String): DomainLocalRef + protected def createRemoteDomainReference(name: String): DomainRemoteRef + + /** So we can we make this available + */ + protected def selfAlias(): Bind[_] = Bind(ConsoleEnvironmentBinding.BindingName, this) + + override def onClosed(): Unit = { + Lifecycle.close(grpcAdminCommandRunner, environment)(logger) + } + + def closeChannels(): Unit = { + grpcAdminCommandRunner.closeChannels() + } + + def startAll(): Unit = runE(environment.startAll()) + + def stopAll(): Unit = runE(environment.stopAll()) + +} + +/** Expose a Canton [[environment.Environment]] in a way that's easy to deal with from a REPL. + */ +object ConsoleEnvironment { + + trait Implicits { + + import scala.language.implicitConversions + + implicit def toInstanceReferenceExtensions( + instances: Seq[LocalInstanceReferenceCommon] + ): LocalInstancesExtensions = + new LocalInstancesExtensions.Impl(instances) + + /** Implicit maps an LfPartyId to a PartyId */ + implicit def toPartId(lfPartyId: LfPartyId): PartyId = PartyId.tryFromLfParty(lfPartyId) + + /** Extensions for many instance references + */ + implicit def toLocalDomainExtensions( + instances: Seq[LocalDomainReference] + ): LocalInstancesExtensions = + new LocalDomainReferencesExtensions(instances) + + /** Extensions for many participant references + */ + implicit def toParticipantReferencesExtensions(participants: Seq[ParticipantReferenceCommon])( + implicit consoleEnvironment: ConsoleEnvironment + ): ParticipantReferencesExtensions = + new ParticipantReferencesExtensions(participants) + + implicit def toLocalParticipantReferencesExtensions( + participants: Seq[LocalParticipantReference] + )(implicit consoleEnvironment: ConsoleEnvironment): LocalParticipantReferencesExtensions = + new LocalParticipantReferencesExtensions(participants) + + /** Implicitly map strings to DomainAlias, Fingerprint and Identifier + */ + implicit def toDomainAlias(alias: String): DomainAlias = DomainAlias.tryCreate(alias) + implicit def toDomainAliases(aliases: Seq[String]): Seq[DomainAlias] = + aliases.map(DomainAlias.tryCreate) + + implicit def toInstanceName(name: String): InstanceName = InstanceName.tryCreate(name) + + implicit def toGrpcSequencerConnection(connection: String): SequencerConnection = + GrpcSequencerConnection.tryCreate(connection) + + implicit def toSequencerConnections(connection: String): SequencerConnections = + SequencerConnections.single(GrpcSequencerConnection.tryCreate(connection)) + + implicit def toGSequencerConnection( + ref: InstanceReferenceWithSequencerConnection + ): SequencerConnection = + ref.sequencerConnection + + implicit def toGSequencerConnections( + ref: InstanceReferenceWithSequencerConnection + ): SequencerConnections = + SequencerConnections.single(ref.sequencerConnection) + + implicit def toIdentifier(id: String): Identifier = Identifier.tryCreate(id) + implicit def toFingerprint(fp: String): Fingerprint = Fingerprint.tryCreate(fp) + + /** Implicitly map ParticipantReferences to the ParticipantId + */ + implicit def toParticipantId(reference: ParticipantReference): ParticipantId = reference.id + implicit def toParticipantIdX(reference: ParticipantReferenceX): ParticipantId = reference.id + + /** Implicitly map an `Int` to a `NonNegativeInt`. + * @throws java.lang.IllegalArgumentException if `n` is negative + */ + implicit def toNonNegativeInt(n: Int): NonNegativeInt = NonNegativeInt.tryCreate(n) + + /** Implicitly map an `Int` to a `PositiveInt`. + * @throws java.lang.IllegalArgumentException if `n` is not positive + */ + implicit def toPositiveInt(n: Int): PositiveInt = PositiveInt.tryCreate(n) + + /** Implicitly map a Double to a `PositiveDouble` + * @throws java.lang.IllegalArgumentException if `n` is not positive + */ + implicit def toPositiveDouble(n: Double): PositiveDouble = PositiveDouble.tryCreate(n) + + /** Implicitly map a `CantonTimestamp` to a `LedgerCreateTime` + */ + implicit def toLedgerCreateTime(ts: CantonTimestamp): SerializableContract.LedgerCreateTime = + SerializableContract.LedgerCreateTime(ts) + + /** Implicitly convert a duration to a [[com.digitalasset.canton.config.NonNegativeDuration]] + * @throws java.lang.IllegalArgumentException if `duration` is negative + */ + implicit def durationToNonNegativeDuration(duration: SDuration): NonNegativeDuration = + NonNegativeDuration.tryFromDuration(duration) + + /** Implicitly convert a duration to a [[com.digitalasset.canton.config.NonNegativeFiniteDuration]] + * @throws java.lang.IllegalArgumentException if `duration` is negative or infinite + */ + implicit def durationToNonNegativeFiniteDuration( + duration: SDuration + ): NonNegativeFiniteDuration = + NonNegativeFiniteDuration.tryFromDuration(duration) + + /** Implicitly convert a duration to a [[com.digitalasset.canton.config.PositiveDurationSeconds]] + * + * @throws java.lang.IllegalArgumentException if `duration` is not positive or not rounded to the second. + */ + implicit def durationToPositiveDurationRoundedSeconds( + duration: SDuration + ): PositiveDurationSeconds = + PositiveDurationSeconds.tryFromDuration(duration) + } + + object Implicits extends Implicits + +} + +class SimClockCommand(clock: SimClock) { + + @Help.Description("Get current time") + def now: Instant = clock.now.toInstant + + @Help.Description("Advance time to given time-point") + def advanceTo(timestamp: Instant): Unit = TraceContext.withNewTraceContext { + implicit traceContext => + clock.advanceTo(CantonTimestamp.assertFromInstant(timestamp)) + } + + @Help.Description("Advance time by given time-period") + def advance(duration: JDuration): Unit = TraceContext.withNewTraceContext { + implicit traceContext => + clock.advance(duration) + } + + @Help.Summary("Reset simulation clock") + def reset(): Unit = clock.reset() + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleEnvironmentBinding.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleEnvironmentBinding.scala new file mode 100644 index 0000000000..54a2992f87 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleEnvironmentBinding.scala @@ -0,0 +1,74 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +/** To make the [[ConsoleEnvironment]] functionality conveniently available in ammonite we stash + * it in a implicit variable included as a predef before any script or REPL commands are run. + */ +class ConsoleEnvironmentBinding { + + protected def consoleMacrosImport: String = + "import com.digitalasset.canton.console.ConsoleMacros._" + + /** The predef code itself which is executed before any script or repl command */ + def predefCode(interactive: Boolean, noTty: Boolean = false): String = { + val consoleEnvClassName = objectClassNameWithoutSuffix(ConsoleEnvironment.Implicits.getClass) + + // this is the magic which allows us to use extensions such as `all start` on a sequence of instance references + // and those extensions to still obtain an implicit reference to the [[ConsoleEnvironment]] instance (where state like packages is kept) + val builder = new StringBuilder(s""" + |interp.configureCompiler(_.settings.processArgumentString("-Xsource:2.13")) + |import $consoleEnvClassName._ + |import com.digitalasset.canton.topology.store.TimeQuery + |import com.digitalasset.canton.topology._ + |import com.digitalasset.canton.topology.transaction._ + |import com.digitalasset.canton.crypto._ + |import com.digitalasset.canton.config._ + |import com.digitalasset.canton.admin.api.client.data._ + |import com.digitalasset.canton.participant.domain.DomainConnectionConfig + |import com.digitalasset.canton.SequencerAlias + |import com.digitalasset.canton.sequencing.SequencerConnection + |import com.digitalasset.canton.sequencing.SequencerConnections + |import com.digitalasset.canton.sequencing.GrpcSequencerConnection + |$consoleMacrosImport + |import com.digitalasset.canton.console.commands.DomainChoice + |import ${classOf[com.digitalasset.canton.console.BootstrapScriptException].getName} + |import com.digitalasset.canton.config.RequireTypes._ + |import com.digitalasset.canton.participant.admin.ResourceLimits + |import java.time.Instant + |import scala.concurrent.ExecutionContextExecutor + |import scala.concurrent.duration._ + |import scala.language.postfixOps + |implicit val consoleEnvironment = ${ConsoleEnvironmentBinding.BindingName} + |implicit val ec: ExecutionContextExecutor = consoleEnvironment.environment.executionContext + |implicit def fromSequencerConnection(connection: SequencerConnection): SequencerConnections = + | SequencerConnections.single(connection) + |def help = consoleEnvironment.help + |def help(s: String) = consoleEnvironment.help(s) + |def health = consoleEnvironment.health + |def logger = consoleEnvironment.consoleLogger + """.stripMargin) + + // if we don't have a tty available switch the ammonite frontend to a dumb terminal + if (noTty) { + builder ++= System.lineSeparator() + builder ++= "repl.frontEnd() = new ammonite.repl.FrontEnds.JLineUnix(ammonite.compiler.Parsers)" + } + + if (interactive) { + builder ++= System.lineSeparator() + builder ++= "repl.pprinter() = repl.pprinter().copy(additionalHandlers = { case p: com.digitalasset.canton.logging.pretty.PrettyPrinting => import com.digitalasset.canton.logging.pretty.Pretty._; p.toTree }, defaultHeight = 100)" + } + + builder.result() + } + +} + +object ConsoleEnvironmentBinding { + + /** where we hide the value of the active environment instance within the scope of our repl ******* */ + private[console] val BindingName = "__replEnvironmentValue" + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleErrorHandler.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleErrorHandler.scala new file mode 100644 index 0000000000..fc46d06a63 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleErrorHandler.scala @@ -0,0 +1,32 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import scala.util.control.NoStackTrace + +/** Handle an error from a console. + * We expect this implementation will either throw or exit, hence the [[scala.Nothing]] return type. + */ +trait ConsoleErrorHandler { + def handleCommandFailure(): Nothing + + def handleInternalError(): Nothing +} + +final class CommandFailure() extends Throwable("Command execution failed.") with NoStackTrace + +final class CantonInternalError() + extends Throwable( + "Command execution failed due to an internal error. Please file a bug report." + ) + with NoStackTrace + +/** Throws a [[CommandFailure]] or [[CantonInternalError]] when a command fails. + * The throwables do not have a stacktraces, to avoid noise in the interactive console. + */ +object ThrowErrorHandler extends ConsoleErrorHandler { + override def handleCommandFailure(): Nothing = throw new CommandFailure() + + override def handleInternalError(): Nothing = throw new CantonInternalError() +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleGrpcAdminCommandRunner.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleGrpcAdminCommandRunner.scala new file mode 100644 index 0000000000..51f36e2564 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleGrpcAdminCommandRunner.scala @@ -0,0 +1,127 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import com.digitalasset.canton.admin.api.client.GrpcCtlRunner +import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand +import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{ + CustomClientTimeout, + DefaultBoundedTimeout, + DefaultUnboundedTimeout, + ServerEnforcedTimeout, +} +import com.digitalasset.canton.config.RequireTypes.Port +import com.digitalasset.canton.config.{ClientConfig, ConsoleCommandTimeout, NonNegativeDuration} +import com.digitalasset.canton.environment.Environment +import com.digitalasset.canton.lifecycle.Lifecycle.CloseableChannel +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.networking.grpc.ClientChannelBuilder +import com.digitalasset.canton.tracing.{Spanning, TraceContext} +import io.opentelemetry.api.trace.Tracer + +import java.util.concurrent.TimeUnit +import scala.collection.concurrent.TrieMap +import scala.concurrent.duration.{Duration, FiniteDuration} +import scala.concurrent.{ExecutionContextExecutor, blocking} + +/** Attempt to run a grpc admin-api command against whatever is pointed at in the config + */ +class GrpcAdminCommandRunner( + environment: Environment, + val commandTimeouts: ConsoleCommandTimeout, +)(implicit tracer: Tracer) + extends NamedLogging + with AutoCloseable + with Spanning { + + private implicit val executionContext: ExecutionContextExecutor = + environment.executionContext + override val loggerFactory: NamedLoggerFactory = environment.loggerFactory + + private val grpcRunner = new GrpcCtlRunner( + environment.config.monitoring.logging.api.maxMessageLines, + environment.config.monitoring.logging.api.maxStringLength, + loggerFactory, + ) + private val channels = TrieMap[(String, String, Port), CloseableChannel]() + + def runCommandAsync[Result]( + instanceName: String, + command: GrpcAdminCommand[_, _, Result], + clientConfig: ClientConfig, + token: Option[String], + )(implicit traceContext: TraceContext) = { + val awaitTimeout = command.timeoutType match { + case CustomClientTimeout(timeout) => timeout + // If a custom timeout for a console command is set, it involves some non-gRPC timeout mechanism + // -> we set the gRPC timeout to Inf, so gRPC never times out before the other timeout mechanism + case ServerEnforcedTimeout => NonNegativeDuration(Duration.Inf) + case DefaultBoundedTimeout => commandTimeouts.bounded + case DefaultUnboundedTimeout => commandTimeouts.unbounded + } + val callTimeout = awaitTimeout.duration match { + // Abort the command shortly before the console times out, to get a better error message + case x: FiniteDuration => Duration((x.toMillis * 9) / 10, TimeUnit.MILLISECONDS) + case x => x + } + val closeableChannel = getOrCreateChannel(instanceName, clientConfig) + logger.debug(s"Running on ${instanceName} command ${command} against ${clientConfig}")( + traceContext + ) + ( + awaitTimeout, + grpcRunner.run(instanceName, command, closeableChannel.channel, token, callTimeout), + ) + } + + def runCommand[Result]( + instanceName: String, + command: GrpcAdminCommand[_, _, Result], + clientConfig: ClientConfig, + token: Option[String], + ): ConsoleCommandResult[Result] = + withNewTrace[ConsoleCommandResult[Result]](command.fullName) { implicit traceContext => span => + span.setAttribute("instance_name", instanceName) + val (awaitTimeout, commandET) = runCommandAsync(instanceName, command, clientConfig, token) + val apiResult = + awaitTimeout.await( + s"Running on ${instanceName} command ${command} against ${clientConfig}" + )( + commandET.value + ) + // convert to a console command result + apiResult.toResult + } + + private def getOrCreateChannel( + instanceName: String, + clientConfig: ClientConfig, + ): CloseableChannel = + blocking(synchronized { + val addr = (instanceName, clientConfig.address, clientConfig.port) + channels.getOrElseUpdate( + addr, + new CloseableChannel( + ClientChannelBuilder.createChannelToTrustedServer(clientConfig), + logger, + s"ConsoleCommand", + ), + ) + }) + + override def close(): Unit = { + closeChannels() + } + + def closeChannels(): Unit = { + channels.values.foreach(_.close()) + channels.clear() + } +} + +class ConsoleGrpcAdminCommandRunner(consoleEnvironment: ConsoleEnvironment) + extends GrpcAdminCommandRunner( + consoleEnvironment.environment, + consoleEnvironment.commandTimeouts, + )(consoleEnvironment.tracer) diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleMacros.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleMacros.scala new file mode 100644 index 0000000000..5578142ad5 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleMacros.scala @@ -0,0 +1,847 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import better.files.File +import cats.syntax.either.* +import cats.syntax.functor.* +import ch.qos.logback.classic.spi.ILoggingEvent +import ch.qos.logback.classic.{Level, Logger} +import ch.qos.logback.core.spi.AppenderAttachable +import ch.qos.logback.core.{Appender, FileAppender} +import com.daml.ledger.api.v1.commands.{Command, CreateCommand, ExerciseCommand} +import com.daml.ledger.api.v1.event.CreatedEvent +import com.daml.ledger.api.v1.value.Value.Sum +import com.daml.ledger.api.v1.value.{ + Identifier as IdentifierV1, + List as ListV1, + Optional, + Record, + RecordField, + Value, +} +import com.daml.lf.value.Value.ContractId +import com.digitalasset.canton.DomainAlias +import com.digitalasset.canton.admin.api.client.commands.LedgerApiTypeWrappers.ContractData +import com.digitalasset.canton.admin.api.client.data.{ListPartiesResult, TemplateId} +import com.digitalasset.canton.concurrent.Threading +import com.digitalasset.canton.config.NonNegativeDuration +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.console.ConsoleEnvironment.Implicits.* +import com.digitalasset.canton.crypto.{CryptoPureApi, Salt} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.{LastErrorsAppender, NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.participant.admin.inspection.SyncStateInspection +import com.digitalasset.canton.participant.admin.repair.RepairService +import com.digitalasset.canton.participant.config.{AuthServiceConfig, BaseParticipantConfig} +import com.digitalasset.canton.participant.ledger.api.JwtTokenUtilities +import com.digitalasset.canton.protocol.SerializableContract.LedgerCreateTime +import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.tracing.{NoTracing, TraceContext} +import com.digitalasset.canton.util.BinaryFileUtil +import com.digitalasset.canton.version.ProtocolVersion +import com.google.protobuf.ByteString +import com.typesafe.scalalogging.LazyLogging +import io.circe.Encoder +import io.circe.generic.semiauto.deriveEncoder +import io.circe.syntax.* + +import java.io.File as JFile +import java.time.Instant +import scala.annotation.nowarn +import scala.collection.mutable +import scala.concurrent.duration.* +import scala.jdk.CollectionConverters.* + +trait ConsoleMacros extends NamedLogging with NoTracing { + import scala.reflect.runtime.universe.* + + @Help.Summary("Console utilities") + @Help.Group("Utilities") + object utils extends Helpful { + + @Help.Summary("Reflective inspection of object arguments, handy to inspect case class objects") + @Help.Description( + "Return the list field names of the given object. Helpful function when inspecting the return result." + ) + def object_args[T: TypeTag](obj: T): List[String] = type_args[T] + + @Help.Summary("Reflective inspection of type arguments, handy to inspect case class types") + @Help.Description( + "Return the list of field names of the given type. Helpful function when creating new objects for requests." + ) + def type_args[T: TypeTag]: List[String] = + typeOf[T].members.collect { + case m: MethodSymbol if m.isCaseAccessor => s"${m.name}:${m.returnType}" + }.toList + + @Help.Summary("Wait for a condition to become true, using default timeouts") + @Help.Description(""" + |Wait until condition becomes true, with a timeout taken from the parameters.timeouts.console.bounded + |configuration parameter.""") + final def retry_until_true( + condition: => Boolean + )(implicit + env: ConsoleEnvironment + ): Unit = retry_until_true(env.commandTimeouts.bounded)( + condition, + s"Condition never became true within ${env.commandTimeouts.bounded.unwrap}", + ) + + @Help.Summary("Wait for a condition to become true") + @Help.Description("""Wait `timeout` duration until `condition` becomes true. + | Retry evaluating `condition` with an exponentially increasing back-off up to `maxWaitPeriod` duration between retries. + |""") + @SuppressWarnings(Array("org.wartremover.warts.Var", "org.wartremover.warts.While")) + final def retry_until_true( + timeout: NonNegativeDuration, + maxWaitPeriod: NonNegativeDuration = 10.seconds, + )( + condition: => Boolean, + failure: => String = s"Condition never became true within $timeout", + ): Unit = { + val deadline = timeout.asFiniteApproximation.fromNow + var isCompleted = condition + var waitMillis = 1L + while (!isCompleted) { + val timeLeft = deadline.timeLeft + if (timeLeft > Duration.Zero) { + val remaining = (timeLeft min (waitMillis.millis)) max 1.millis + Threading.sleep(remaining.toMillis) + // capped exponentially back off + waitMillis = (waitMillis * 2) min maxWaitPeriod.duration.toMillis + isCompleted = condition + } else { + throw new IllegalStateException(failure) + } + } + } + + @Help.Summary("Wait until all topology changes have been effected on all accessible nodes") + def synchronize_topology( + timeoutO: Option[NonNegativeDuration] = None + )(implicit env: ConsoleEnvironment): Unit = { + ConsoleMacros.utils.retry_until_true(timeoutO.getOrElse(env.commandTimeouts.bounded)) { + env.nodes.all.forall(_.topology.synchronisation.is_idle()) + } + } + + @Help.Summary("Create a navigator ui-backend.conf for a participant") + def generate_navigator_conf( + participant: LocalParticipantReference, + file: Option[String] = None, + ): JFile = { + val conf = + participant.parties + .hosted() + .map(x => x.party) + .map(party => { + s""" ${party.uid.id.unwrap} { + | party = "${party.uid.toProtoPrimitive}" + | password = password + | } + |""".stripMargin + }) + .mkString("\n") + val port = participant.config.ledgerApi.port + val targetFile = file.map(File(_)).getOrElse(File(s"ui-backend-${participant.name}.conf")) + val instructions = + s"daml navigator server localhost ${port.unwrap} -t wallclock --port ${(port + 4).unwrap.toString} -c ${targetFile.name}" + + targetFile.overwrite("// run with\n// ") + targetFile.appendLines(instructions, "users {") + targetFile.appendText(conf) + targetFile.appendLine("}") + + targetFile.toJava + } + + @nowarn("cat=lint-byname-implicit") // https://github.com/scala/bug/issues/12072 + private object GenerateDamlScriptParticipantsConf { + import ConsoleEnvironment.Implicits.* + + private val filename = "participant-config.json" + + case class LedgerApi(host: String, port: Int) + // Keys in the exported JSON should have snake_case + case class Participants( + default_participant: Option[LedgerApi], + participants: Map[String, LedgerApi], + party_participants: Map[String, String], + ) + + implicit val ledgerApiEncoder: Encoder[LedgerApi] = deriveEncoder[LedgerApi] + implicit val participantsEncoder: Encoder[Participants] = deriveEncoder[Participants] + + private def partyIdToParticipants( + useParticipantAlias: Boolean, + uidToAlias: Map[ParticipantId, String], + )(implicit env: ConsoleEnvironment): Map[String, String] = { + def participantReference(p: ParticipantId) = if (useParticipantAlias) + uidToAlias.getOrElse(p, p.uid.toProtoPrimitive) + else p.uid.toProtoPrimitive + def partyIdToParticipant(p: ListPartiesResult) = p.participants.headOption.map { + participantDomains => + (p.party.filterString, participantReference(participantDomains.participant)) + } + + val partyAndParticipants = + env.participants.all.flatMap(_.parties.list().flatMap(partyIdToParticipant(_).toList)) + val allPartiesSingleParticipant = + partyAndParticipants.groupBy { case (partyId, _) => partyId }.forall { + case (_, participants) => participants.sizeCompare(1) <= 0 + } + + if (!allPartiesSingleParticipant) + logger.info( + "Some parties are hosted on more than one participant. " + + "For such parties, only one participant will be exported to the generated config file." + ) + + partyAndParticipants.toMap + } + + def apply( + file: Option[String] = None, + useParticipantAlias: Boolean = true, + defaultParticipant: Option[ParticipantReference] = None, + )(implicit env: ConsoleEnvironment): JFile = { + + def toLedgerApi(participantConfig: BaseParticipantConfig) = + LedgerApi( + participantConfig.clientLedgerApi.address, + participantConfig.clientLedgerApi.port.unwrap, + ) + + def participantValue(p: ParticipantReference): String = + if (useParticipantAlias) p.name else p.uid.toProtoPrimitive + + val allParticipants = env.participants.all + val participantsData = + allParticipants.map(p => (participantValue(p), toLedgerApi(p.config))).toMap + val uidToAlias = allParticipants.map(p => (p.id, p.name)).toMap + + val default_participant = + defaultParticipant.map(participantReference => toLedgerApi(participantReference.config)) + + val participantJson = Participants( + default_participant, + participantsData, + partyIdToParticipants(useParticipantAlias, uidToAlias), + ).asJson.spaces2 + + val targetFile = file.map(File(_)).getOrElse(File(filename)) + targetFile.overwrite(participantJson).appendLine() + + targetFile.toJava + } + } + + @Help.Summary("Create a participants config for Daml script") + @Help.Description( + """The generated config can be passed to `daml script` via the `participant-config` parameter. + |More information about the file format can be found in the `documentation `_: + |It takes three arguments: + |- file (default to "participant-config.json") + |- useParticipantAlias (default to true): participant aliases are used instead of UIDs + |- defaultParticipant (default to None): adds a default participant if provided + |""" + ) + def generate_daml_script_participants_conf( + file: Option[String] = None, + useParticipantAlias: Boolean = true, + defaultParticipant: Option[ParticipantReference] = None, + )(implicit env: ConsoleEnvironment): JFile = + GenerateDamlScriptParticipantsConf( + file, + useParticipantAlias, + defaultParticipant, + ) + + // TODO(i7387): add check that flag is set + @Help.Summary( + "Register `AutoCloseable` object to be shutdown if Canton is shut down", + FeatureFlag.Testing, + ) + def auto_close(closeable: AutoCloseable)(implicit environment: ConsoleEnvironment): Unit = { + environment.environment.addUserCloseable(closeable) + } + + @Help.Summary("Convert contract data to a contract instance.") + @Help.Description( + """The `utils.contract_data_to_instance` bridges the gap between `participant.ledger_api.acs` commands that + |return various pieces of "contract data" and the `participant.repair.add` command used to add "contract instances" + |as part of repair workflows. Such workflows (for example migrating contracts from other Daml ledgers to Canton + |participants) typically consist of extracting contract data using `participant.ledger_api.acs` commands, + |modifying the contract data, and then converting the `contractData` using this function before finally + |adding the resulting contract instances to Canton participants via `participant.repair.add`. + |Obtain the `contractData` by invoking `.toContractData` on the `WrappedCreatedEvent` returned by the + |corresponding `participant.ledger_api.acs.of_party` or `of_all` call. The `ledgerTime` parameter should be + |chosen to be a time meaningful to the domain on which you plan to subsequently invoke `participant.repair.add` + |on and will be retained alongside the contract instance by the `participant.repair.add` invocation.""" + ) + def contract_data_to_instance(contractData: ContractData, ledgerTime: Instant)(implicit + env: ConsoleEnvironment + ): SerializableContract = + TraceContext.withNewTraceContext { implicit traceContext => + env.runE( + RepairService.ContractConverter.contractDataToInstance( + contractData.templateId.toIdentifier, + contractData.createArguments, + contractData.signatories, + contractData.observers, + contractData.inheritedContractId, + contractData.ledgerCreateTime.map(_.toInstant).getOrElse(ledgerTime), + contractData.contractSalt, + ) + ) + } + + @Help.Summary("Convert a contract instance to contract data.") + @Help.Description( + """The `utils.contract_instance_to_data` converts a Canton "contract instance" to "contract data", a format more + |amenable to inspection and modification as part of repair workflows. This function consumes the output of + |the `participant.testing` commands and can thus be employed in workflows geared at verifying the contents of + |contracts for diagnostic purposes and in environments in which the "features.enable-testing-commands" + |configuration can be (at least temporarily) enabled.""" + ) + def contract_instance_to_data( + contract: SerializableContract + )(implicit env: ConsoleEnvironment): ContractData = + env.runE( + RepairService.ContractConverter.contractInstanceToData(contract).map { + case ( + templateId, + createArguments, + signatories, + observers, + contractId, + contractSaltO, + ledgerCreateTime, + ) => + ContractData( + TemplateId.fromIdentifier(templateId), + createArguments, + signatories, + observers, + contractId, + contractSaltO, + Some(ledgerCreateTime.ts.underlying), + ) + } + ) + + @Help.Summary("Recompute authenticated contract ids.") + @Help.Description( + """The `utils.recompute_contract_ids` regenerates "contract ids" of multiple contracts after their contents have + |changed. Starting from protocol version 4, Canton uses the so called authenticated contract ids which depend + |on the details of the associated contracts. When aspects of a contract such as the parties involved change as + |part of repair or export/import procedure, the corresponding contract id must be recomputed.""" + ) + def recompute_contract_ids( + participant: LocalParticipantReference, + acs: Seq[SerializableContract], + protocolVersion: ProtocolVersion, + ): (Seq[SerializableContract], Map[LfContractId, LfContractId]) = { + val contractIdMappings = mutable.Map.empty[LfContractId, LfContractId] + // We assume ACS events are in order + val remappedCIds = acs.map { contract => + // Update the referenced contract ids + val contractInstanceWithUpdatedContractIdReferences = + SerializableRawContractInstance + .create( + contract.rawContractInstance.contractInstance.map(_.mapCid(contractIdMappings)), + AgreementText.empty, // Empty is fine, because the agreement text is not used when generating the raw serializable contract hash + ) + .valueOr(err => + throw new RuntimeException( + s"Could not create serializable raw contract instance: $err" + ) + ) + + val LfContractId.V1(discriminator, _) = contract.contractId + val contractSalt = contract.contractSalt.getOrElse( + throw new IllegalArgumentException("Missing contract salt") + ) + val pureCrypto = participant.underlying + .map(_.cryptoPureApi) + .getOrElse(sys.error("where is my crypto?")) + + // Compute the new contract id + val newContractId = + generate_contract_id( + cryptoPureApi = pureCrypto, + rawContract = contractInstanceWithUpdatedContractIdReferences, + createdAt = contract.ledgerCreateTime.ts, + discriminator = discriminator, + contractSalt = contractSalt, + metadata = contract.metadata, + ) + + // Update the contract id mappings with the current contract's id + contractIdMappings += contract.contractId -> newContractId + + // Update the contract with the new contract id and recomputed instance + contract + .copy( + contractId = newContractId, + rawContractInstance = contractInstanceWithUpdatedContractIdReferences, + ) + } + + remappedCIds -> Map.from(contractIdMappings) + } + + @Help.Summary("Generate authenticated contract id.") + @Help.Description( + """The `utils.generate_contract_id` generates "contract id" of a contract. Starting from protocol version 4, + |Canton uses the so called authenticated contract ids which depend on the details of the associated contracts. + |When aspects of a contract such as the parties involved change as part of repair or export/import procedure, + |the corresponding contract id must be recomputed. This function can be used as a tool to generate an id for + |an arbitrary contract content""" + ) + def generate_contract_id( + cryptoPureApi: CryptoPureApi, + rawContract: SerializableRawContractInstance, + createdAt: CantonTimestamp, + discriminator: LfHash, + contractSalt: Salt, + metadata: ContractMetadata, + ): ContractId.V1 = { + val unicumGenerator = new UnicumGenerator(cryptoPureApi) + val cantonContractIdVersion = AuthenticatedContractIdVersionV2 + val unicum = unicumGenerator + .recomputeUnicum( + contractSalt, + LedgerCreateTime(createdAt), + metadata, + rawContract, + cantonContractIdVersion, + ) + .valueOr(err => throw new RuntimeException(err)) + cantonContractIdVersion.fromDiscriminator(discriminator, unicum) + } + + @Help.Summary("Writes several Protobuf messages to a file.") + def write_to_file(data: Seq[scalapb.GeneratedMessage], fileName: String): Unit = + File(fileName).outputStream.foreach { os => + data.foreach(_.writeDelimitedTo(os)) + } + + @Help.Summary("Reads several Protobuf messages from a file.") + @Help.Description("Fails with an exception, if the file can't be read or parsed.") + def read_all_messages_from_file[A <: scalapb.GeneratedMessage]( + fileName: String + )(implicit companion: scalapb.GeneratedMessageCompanion[A]): Seq[A] = + File(fileName).inputStream + .apply { is => + Seq.unfold(()) { _ => + companion.parseDelimitedFrom(is).map(_ -> ()) + } + } + + @Help.Summary("Writes a Protobuf message to a file.") + def write_to_file(data: scalapb.GeneratedMessage, fileName: String): Unit = + write_to_file(Seq(data), fileName) + + @Help.Summary("Reads a single Protobuf message from a file.") + @Help.Description("Fails with an exception, if the file can't be read or parsed.") + def read_first_message_from_file[A <: scalapb.GeneratedMessage]( + fileName: String + )(implicit companion: scalapb.GeneratedMessageCompanion[A]): A = + File(fileName).inputStream + .apply(companion.parseDelimitedFrom) + .getOrElse( + throw new IllegalArgumentException( + s"Unable to read ${companion.getClass.getSimpleName} from $fileName." + ) + ) + + @Help.Summary("Writes a ByteString to a file.") + def write_to_file(data: ByteString, fileName: String): Unit = + BinaryFileUtil.writeByteStringToFile(fileName, data) + + @Help.Summary("Reads a ByteString from a file.") + @Help.Description("Fails with an exception, if the file can't be read.") + def read_byte_string_from_file(fileName: String)(implicit env: ConsoleEnvironment): ByteString = + env.runE(BinaryFileUtil.readByteStringFromFile(fileName)) + + } + + @Help.Summary("Canton development and testing utilities", FeatureFlag.Testing) + @Help.Group("Ledger Api Testing") + object ledger_api_utils extends Helpful { + + private def buildIdentifier(packageId: String, module: String, template: String): IdentifierV1 = + IdentifierV1( + packageId = packageId, + moduleName = module, + entityName = template, + ) + + private def productToLedgerApiRecord(product: Product): Sum.Record = Value.Sum.Record( + Record(fields = + product.productIterator + .map(mapToLedgerApiValue) + .map(v => RecordField(value = Some(v))) + .toSeq + ) + ) + + private def mapToLedgerApiValue(value: Any): Value = { + + // assuming that String.toString = id, we'll just map any Map to a string map without casting + def safeMapCast(map: Map[_, _]): Map[String, Any] = map.map { case (key, value) => + (key.toString, value) + } + + val x: Value.Sum = value match { + case x: Int => Value.Sum.Int64(x.toLong) + case x: Long => Value.Sum.Int64(x) + case x: PartyId => Value.Sum.Party(x.toLf) + case x: Float => Value.Sum.Numeric(s"$x") + case x: Double => Value.Sum.Numeric(s"$x") + case x: String => Value.Sum.Text(x) + case x: Boolean => Value.Sum.Bool(x) + case x: Seq[Any] => Value.Sum.List(value = ListV1(x.map(mapToLedgerApiValue))) + case x: LfContractId => Value.Sum.ContractId(x.coid) + case x: Instant => Value.Sum.Timestamp(x.toEpochMilli * 1000L) + case x: Option[Any] => Value.Sum.Optional(Optional(value = x.map(mapToLedgerApiValue))) + case x: Value.Sum => x + case x: Map[_, _] => Value.Sum.Record(buildArguments(safeMapCast(x))) + case x: (Any, Any) => productToLedgerApiRecord(x) + case x: (Any, Any, Any) => productToLedgerApiRecord(x) + case _ => + throw new UnsupportedOperationException( + s"value type not yet implemented: ${value.getClass}" + ) + } + Value(x) + } + + private def mapToRecordField(item: (String, Any)): RecordField = + RecordField( + label = item._1, + value = Some(mapToLedgerApiValue(item._2)), + ) + + private def buildArguments(map: Map[String, Any]): Record = + Record( + fields = map.map(mapToRecordField).toSeq + ) + + @Help.Summary("Build create command", FeatureFlag.Testing) + def create( + packageId: String, + module: String, + template: String, + arguments: Map[String, Any], + ): Command = + Command().withCreate( + CreateCommand( + templateId = Some(buildIdentifier(packageId, module, template)), + createArguments = Some(buildArguments(arguments)), + ) + ) + + @Help.Summary("Build exercise command", FeatureFlag.Testing) + def exercise( + packageId: String, + module: String, + template: String, + choice: String, + arguments: Map[String, Any], + contractId: String, + ): Command = + Command().withExercise( + ExerciseCommand( + templateId = Some(buildIdentifier(packageId, module, template)), + choice = choice, + choiceArgument = Some(Value(Value.Sum.Record(buildArguments(arguments)))), + contractId = contractId, + ) + ) + + @Help.Summary("Build exercise command from CreatedEvent", FeatureFlag.Testing) + def exercise(choice: String, arguments: Map[String, Any], event: CreatedEvent): Command = { + def getOrThrow(desc: String, opt: Option[String]): String = + opt.getOrElse( + throw new IllegalArgumentException(s"Corrupt created event ${event} without ${desc}") + ) + exercise( + getOrThrow( + "packageId", + event.templateId + .map(_.packageId), + ), + getOrThrow("moduleName", event.templateId.map(_.moduleName)), + getOrThrow("template", event.templateId.map(_.entityName)), + choice, + arguments, + event.contractId, + ) + } + + // intentionally not publicly documented + object jwt { + def generate_unsafe_token_for_participant( + participant: LocalParticipantReference, + admin: Boolean, + applicationId: String, + ): Map[PartyId, String] = { + val secret = participant.config.ledgerApi.authServices + .collectFirst { case AuthServiceConfig.UnsafeJwtHmac256(secret, _, _) => + secret.unwrap + } + .getOrElse("notasecret") + + participant.parties + .hosted() + .map(_.party) + .map(x => + ( + x, + generate_unsafe_jwt256_token( + secret = secret, + admin = admin, + readAs = List(x.toLf), + actAs = List(x.toLf), + ledgerId = Some(participant.id.uid.id.unwrap), + applicationId = Some(applicationId), + ), + ) + ) + .toMap + } + + def generate_unsafe_jwt256_token( + secret: String, + admin: Boolean, + readAs: List[String], + actAs: List[String], + ledgerId: Option[String], + applicationId: Option[String], + ): String = JwtTokenUtilities.buildUnsafeToken( + secret = secret, + admin = admin, + readAs = readAs, + actAs = actAs, + ledgerId = ledgerId, + applicationId = applicationId, + ) + } + } + + @Help.Summary("Logging related commands") + @Help.Group("Logging") + object logging extends Helpful { + + @SuppressWarnings(Array("org.wartremover.warts.Null")) + @Help.Summary("Dynamically change log level (TRACE, DEBUG, INFO, WARN, ERROR, OFF, null)") + def set_level(loggerName: String = "com.digitalasset.canton", level: String): Unit = { + if (Seq("com.digitalasset.canton", "com.daml").exists(loggerName.startsWith)) + System.setProperty("LOG_LEVEL_CANTON", level) + + val logger = getLogger(loggerName) + if (level == "null") + logger.setLevel(null) + else + logger.setLevel(Level.valueOf(level)) + } + + @Help.Summary("Determine current logging level") + def get_level(loggerName: String = "com.digitalasset.canton"): Option[Level] = + Option(getLogger(loggerName).getLevel) + + private def getLogger(loggerName: String): Logger = { + import org.slf4j.LoggerFactory + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + val logger: Logger = LoggerFactory.getLogger(loggerName).asInstanceOf[Logger] + logger + } + + private def getAppenders(logger: Logger): List[Appender[ILoggingEvent]] = { + def go(currentAppender: Appender[ILoggingEvent]): List[Appender[ILoggingEvent]] = { + currentAppender match { + case attachable: AppenderAttachable[ILoggingEvent @unchecked] => + attachable.iteratorForAppenders().asScala.toList.flatMap(go) + case appender: Appender[ILoggingEvent] => List(appender) + } + } + + logger.iteratorForAppenders().asScala.toList.flatMap(go) + } + + private lazy val rootLogger = getLogger(org.slf4j.Logger.ROOT_LOGGER_NAME) + + private lazy val allAppenders = getAppenders(rootLogger) + + private lazy val lastErrorsAppender: LastErrorsAppender = { + findAppender("LAST_ERRORS") match { + case Some(lastErrorsAppender: LastErrorsAppender) => + lastErrorsAppender + case _ => + logger.error(s"Log appender for last errors not found/configured") + throw new CommandFailure() + } + } + + private def findAppender(appenderName: String): Option[Appender[ILoggingEvent]] = + Option(rootLogger.getAppender(appenderName)) + .orElse(allAppenders.find(_.getName == appenderName)) + + private def renderError(errorEvent: ILoggingEvent): String = { + findAppender("FILE") match { + case Some(appender: FileAppender[ILoggingEvent]) => + ByteString.copyFrom(appender.getEncoder.encode(errorEvent)).toStringUtf8 + case _ => errorEvent.getFormattedMessage + } + } + + @Help.Summary("Returns the last errors (trace-id -> error event) that have been logged locally") + def last_errors(): Map[String, String] = + lastErrorsAppender.lastErrors.fmap(renderError) + + @Help.Summary("Returns log events for an error with the same trace-id") + def last_error_trace(traceId: String): Seq[String] = { + lastErrorsAppender.lastErrorTrace(traceId) match { + case Some(events) => events.map(renderError) + case None => + logger.error(s"No events found for last error trace-id $traceId") + throw new CommandFailure() + } + } + } + + @Help.Summary("Configure behaviour of console") + @Help.Group("Console") + object console extends Helpful { + + @Help.Summary("Yields the timeout for running console commands") + @Help.Description( + "Yields the timeout for running console commands. " + + "When the timeout has elapsed, the console stops waiting for the command result. " + + "The command will continue running in the background." + ) + def command_timeout(implicit env: ConsoleEnvironment): NonNegativeDuration = + env.commandTimeouts.bounded + + @Help.Summary("Sets the timeout for running console commands.") + @Help.Description( + "Sets the timeout for running console commands. " + + "When the timeout has elapsed, the console stops waiting for the command result. " + + "The command will continue running in the background. " + + "The new timeout must be positive." + ) + def set_command_timeout(newTimeout: NonNegativeDuration)(implicit + env: ConsoleEnvironment + ): Unit = + env.setCommandTimeout(newTimeout) + + // this command is intentionally not documented as part of the help system + def disable_features(flag: FeatureFlag)(implicit env: ConsoleEnvironment): Unit = { + env.updateFeatureSet(flag, include = false) + } + + // this command is intentionally not documented as part of the help system + def enable_features(flag: FeatureFlag)(implicit env: ConsoleEnvironment): Unit = { + env.updateFeatureSet(flag, include = true) + } + } + +} + +object ConsoleMacros extends ConsoleMacros with NamedLogging { + val loggerFactory = NamedLoggerFactory.root +} + +object DebuggingHelpers extends LazyLogging { + + def get_active_contracts( + ref: LocalParticipantReference, + limit: PositiveInt = PositiveInt.tryCreate(1000000), + ): (Map[String, String], Map[String, TemplateId]) = + get_active_contracts_helper( + ref, + alias => ref.testing.pcs_search(alias, activeSet = true, limit = limit), + ) + + def get_active_contracts_from_internal_db_state( + ref: ParticipantReference, + state: SyncStateInspection, + limit: PositiveInt = PositiveInt.tryCreate(1000000), + ): (Map[String, String], Map[String, TemplateId]) = + get_active_contracts_helper( + ref, + alias => + TraceContext.withNewTraceContext(implicit traceContext => + state.findContracts(alias, None, None, None, limit.value) + ), + ) + + private def get_active_contracts_helper( + ref: ParticipantReference, + lookup: DomainAlias => Seq[(Boolean, SerializableContract)], + ): (Map[String, String], Map[String, TemplateId]) = { + val syncAcs = ref.domains + .list_connected() + .map(_.domainAlias) + .flatMap(lookup) + .collect { + case (active, sc) if active => + (sc.contractId.coid, sc.contractInstance.unversioned.template.qualifiedName.toString()) + } + .toMap + val lapiAcs = ref.ledger_api.acs.of_all().map(ev => (ev.event.contractId, ev.templateId)).toMap + (syncAcs, lapiAcs) + } + + def diff_active_contracts(ref: LocalParticipantReference, limit: Int = 1000000): Unit = { + val (syncAcs, lapiAcs) = get_active_contracts(ref, limit) + if (syncAcs.sizeCompare(lapiAcs) != 0) { + logger.error(s"Sync ACS differs ${syncAcs.size} from Ledger API ACS ${lapiAcs.size} in size") + } + + val lapiSet = lapiAcs.keySet + val syncSet = syncAcs.keySet + + def compare[V]( + explain: String, + lft: Set[String], + rght: Set[String], + payload: Map[String, V], + ) = { + val delta = lft.diff(rght) + delta.foreach { key => + logger.info(s"${explain} ${key} ${payload.getOrElse(key, sys.error("should be there"))}") + } + } + + compare("Active in LAPI but not in SYNC", lapiSet, syncSet, lapiAcs) + compare("Active in SYNC but not in LAPI", syncSet, lapiSet, syncAcs) + + } + + def active_contracts_by_template( + ref: LocalParticipantReference, + limit: Int = 1000000, + ): (Map[String, Int], Map[TemplateId, Int]) = { + val (syncAcs, lapiAcs) = get_active_contracts(ref, limit) + val groupedSync = syncAcs.toSeq + .map { x => + x.swap + } + .groupBy(_._1) + .map(x => (x._1, x._2.length)) + val groupedLapi = lapiAcs.toSeq + .map { x => + x.swap + } + .groupBy(_._1) + .map(x => (x._1, x._2.length)) + (groupedSync, groupedLapi) + } + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleOutput.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleOutput.scala new file mode 100644 index 0000000000..f42c59bb1c --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleOutput.scala @@ -0,0 +1,26 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +/** Interface for output to the Console user. + */ +trait ConsoleOutput { + + /** By default, commands should not output anything to the user. So use this only if it absolutely has to be. + * + * In particular: + * - If there is an error, then report this to the log file. The log file will also be displayed to the user. + * - If a command completes successfully, do not output anything. + * - If a command returns some `value`, then make the command return the value (instead of printing the value to the console). + * This allows the user to access the value programmatically. + * (Make sure that `value.toString` creates a readable representation.) + */ + def info(message: String): Unit +} + +/** Logs directly to stdout and stderr. + */ +object StandardConsoleOutput extends ConsoleOutput { + override def info(message: String): Unit = Console.out.println(message) +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/FeatureFlag.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/FeatureFlag.scala new file mode 100644 index 0000000000..03ec49df06 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/FeatureFlag.scala @@ -0,0 +1,34 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +sealed trait FeatureFlag { + def configName: String +} + +object FeatureFlag { + + object Stable extends FeatureFlag { + val configName = "enabled-by-default" + override def toString: String = "Stable" + } + + object Preview extends FeatureFlag { + val configName = "enable-preview-commands" + override def toString: String = "Preview" + } + + object Repair extends FeatureFlag { + val configName = "enable-repair-commands" + override def toString: String = "Repair" + } + + object Testing extends FeatureFlag { + val configName = "enable-testing-commands" + override def toString: String = "Testing" + } + + lazy val all = Set(Stable, Preview, Repair, Testing) + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/HealthDumpGenerator.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/HealthDumpGenerator.scala new file mode 100644 index 0000000000..f6b3c469e7 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/HealthDumpGenerator.scala @@ -0,0 +1,127 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import better.files.File +import com.digitalasset.canton.admin.api.client.commands.StatusAdminCommands +import com.digitalasset.canton.admin.api.client.data.CantonStatus +import com.digitalasset.canton.config.LocalNodeConfig +import com.digitalasset.canton.console.CommandErrors.CommandError +import com.digitalasset.canton.environment.Environment +import com.digitalasset.canton.health.admin.data.NodeStatus +import com.digitalasset.canton.health.admin.{data, v0} +import com.digitalasset.canton.metrics.MetricsSnapshot +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.version.ReleaseVersion +import io.circe.Encoder +import io.circe.syntax.* + +import scala.annotation.nowarn + +/** Generates a health dump zip file containing information about the current Canton process + * This is the core of the implementation of the HealthDump gRPC endpoint. + */ +trait HealthDumpGenerator[Status <: CantonStatus] { + def status(): Status + def environment: Environment + def grpcAdminCommandRunner: GrpcAdminCommandRunner + protected implicit val statusEncoder: Encoder[Status] + + protected def getStatusForNode[S <: NodeStatus.Status]( + nodeName: String, + nodeConfig: LocalNodeConfig, + deserializer: v0.NodeStatus.Status => ParsingResult[S], + ): NodeStatus[S] = { + grpcAdminCommandRunner + .runCommand( + nodeName, + new StatusAdminCommands.GetStatus[S](deserializer), + nodeConfig.clientAdminApi, + None, + ) match { + case CommandSuccessful(value) => value + case err: CommandError => data.NodeStatus.Failure(err.cause) + } + } + + protected def statusMap[S <: NodeStatus.Status]( + nodes: Map[String, LocalNodeConfig], + deserializer: v0.NodeStatus.Status => ParsingResult[S], + ): Map[String, () => NodeStatus[S]] = { + nodes.map { case (nodeName, nodeConfig) => + nodeName -> (() => getStatusForNode[S](nodeName, nodeConfig, deserializer)) + } + } + + @nowarn("cat=lint-byname-implicit") // https://github.com/scala/bug/issues/12072 + def generateHealthDump( + outputFile: File, + extraFilesToZip: Seq[File] = Seq.empty, + ): File = { + import io.circe.generic.auto.* + import CantonHealthAdministrationEncoders.* + + final case class EnvironmentInfo(os: String, javaVersion: String) + + final case class CantonDump( + releaseVersion: String, + environment: EnvironmentInfo, + config: String, + status: Status, + metrics: MetricsSnapshot, + traces: Map[Thread, Array[StackTraceElement]], + ) + + val javaVersion = System.getProperty("java.version") + val cantonVersion = ReleaseVersion.current.fullVersion + val env = EnvironmentInfo(sys.props("os.name"), javaVersion) + + val metricsSnapshot = MetricsSnapshot( + environment.metricsFactory.registry, + environment.configuredOpenTelemetry.onDemandMetricsReader, + ) + val config = environment.config.dumpString + + val traces = { + import scala.jdk.CollectionConverters.* + Thread.getAllStackTraces.asScala.toMap + } + + val dump = CantonDump(cantonVersion, env, config, status(), metricsSnapshot, traces) + + val logFile = + File( + sys.env + .get("LOG_FILE_NAME") + .orElse(sys.props.get("LOG_FILE_NAME")) // This is set in Cli.installLogging + .getOrElse("log/canton.log") + ) + + val logLastErrorsFile = File( + sys.env + .get("LOG_LAST_ERRORS_FILE_NAME") + .orElse(sys.props.get("LOG_LAST_ERRORS_FILE_NAME")) + .getOrElse("log/canton_errors.log") + ) + + // This is a guess based on the default logback config as to what the rolling log files look like + // If we want to be more robust we'd have to access logback directly, extract the pattern from there, and use it to + // glob files. + val rollingLogs = logFile.siblings + .filter { f => + f.name.contains(logFile.name) && f.extension.contains(".gz") + } + .toSeq + .sortBy(_.name) + .take(environment.config.monitoring.dumpNumRollingLogFiles.unwrap) + + File.usingTemporaryFile("canton-dump-", ".json") { tmpFile => + tmpFile.append(dump.asJson.spaces2) + val files = Iterator(logFile, logLastErrorsFile, tmpFile).filter(_.nonEmpty) + outputFile.zipIn(files ++ extraFilesToZip.iterator ++ rollingLogs) + } + + outputFile + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/Help.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/Help.scala new file mode 100644 index 0000000000..338c6cc6cc --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/Help.scala @@ -0,0 +1,349 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import cats.syntax.functor.* +import com.digitalasset.canton.version.ProtocolVersion + +import scala.annotation.StaticAnnotation +import scala.reflect.ClassTag +import scala.reflect.runtime.universe as ru + +/** User friendly help messages generator. + */ +object Help { + + private val defaultTopLevelTopicStr = "Top-level Commands" + val defaultTopLevelTopic = Seq(defaultTopLevelTopicStr) + + /** A short summary of the method (to be displayed in a list) + * + * Note that the annotation parser is also hard-coded to the default flag Stable + */ + final case class Summary(s: String, flag: FeatureFlag = FeatureFlag.Stable) + extends StaticAnnotation { + override def toString: String = s + } + + /** A longer description of the method */ + final case class Description(s: String) extends StaticAnnotation { + override def toString: String = s + } + + /** Indicates that a command is only available for domain running at least the specified protocolVersion. */ + final case class AvailableFrom(protocolVersion: ProtocolVersion) extends StaticAnnotation { + override def toString: String = protocolVersion.toString + } + + /** A sequence of strings classifying the method breadcrumb style (e.g. Seq("Participant", "Diagnostics")). + * Used as headings in the displayed help. + */ + final case class Topic(t: Seq[String]) extends StaticAnnotation { + override def toString(): String = t.mkString(": ") + } + + /** A tag to indicate nesting of items */ + final case class Group(name: String) extends StaticAnnotation { + override def toString: String = name + } + + final case class MethodSignature(argsWithTypes: Seq[(String, String)], retType: String) { + val argString = "(" + argsWithTypes.map(arg => s"${arg._1}: ${arg._2}").mkString(", ") + ")" + val retString = s": $retType" + override def toString(): String = argString + retString + def noUnits(): String = + (if (argsWithTypes.isEmpty) "" else argString) + + (if (retType == "Unit") "" else retString) + } + + final case class Item( + name: String, + signature: Option[MethodSignature], + summary: Summary, + description: Description, + topic: Topic, + subItems: Seq[Item] = Seq.empty, + ) + + /** Generate help messages from an object instance using reflection, using only the given summaries. + * + * ARGUMENTS OF THE ANNOTATIONS MUST BE LITERALS (CONSTANTS) (e.g., Topic(topicVariable) does not work). + * + * All methods with a [[Summary]] annotation will be included. [[Description]] or [[Topic]] + * are also included if present, and are set to the empty string otherwise. + * We attempt to make as friendly as possible: + * - Unit types are not displayed + * - Empty argument lists are dropped + * - Implicits are hidden + * See corresponding tests for examples. + */ + def forInstance[T: ClassTag]( + instance: T, + baseTopic: Seq[String] = Seq(), + scope: Set[FeatureFlag] = FeatureFlag.all, + ): String = { + // type extractor + val items = getItems(instance, baseTopic, scope) + format(items: _*) + } + + def forMethod[T: ClassTag]( + instance: T, + methodName: String, + scope: Set[FeatureFlag] = FeatureFlag.all, + ): String = + forMethod(getItems(instance, scope = scope), methodName) + + def forMethod(items: Seq[Item], methodName: String): String = { + def expand(item: Item): Seq[(String, Item)] = { + (item.name, item) +: item.subItems.flatMap(expand).map { case (mn, itm) => + (item.name + "." + mn, itm) + } + } + val expanded = items.flatMap(expand) + val matching = expanded.filter { case (itemName, _) => itemName == methodName } + if (matching.nonEmpty) { + matching + .map { case (itemName, item) => + formatItem(item.copy(name = itemName)) + } + .mkString(System.lineSeparator()) + } else { + val similarItems = expanded.map(_._1).filter(_.contains(methodName)).sorted.take(10) + if (similarItems.isEmpty) + s"Error: method $methodName not found; check your spelling" + else { + s"Error: method $methodName not found; are you looking for one of the following?\n ${similarItems + .mkString("\n ")}" + } + } + } + + def formatItem(item: Item): String = item match { + case Item(name, optSignature, summary, description, topic, group) => + val sigString = optSignature.map(_.noUnits()).getOrElse("") + val text = if (description.s.nonEmpty) description.toString else summary.toString + Seq(name + sigString, text).mkString(System.lineSeparator) + } + + private def extractItem( + mirror: ru.Mirror, + member: ru.Symbol, + baseTopic: Seq[String], + scope: Set[FeatureFlag], + ): Option[Item] = + memberDescription(member) + .filter { case (summary, _, _, _) => scope.contains(summary.flag) } + .map { case (summary, description, topic, group) => + val methodName = member.name.toString + val info = member.info + val name = methodName + val myTopic = Topic(baseTopic ++ topic.t ++ group.toList) + val summaries = member.typeSignature.members + .flatMap(s => + extractItem(mirror, s, myTopic.t, scope).toList + ) // filter to members that have `@Help.Summary` applied + val signature = Some(methodSignature(info)) + val topicOrDefault = + if (summaries.isEmpty && topic.t.isEmpty && group.isEmpty) + Topic(baseTopic ++ defaultTopLevelTopic) + else myTopic + Item(name, signature, summary, description, topicOrDefault, summaries.toSeq) + } + + def getItems[T: ClassTag]( + instance: T, + baseTopic: Seq[String] = Seq(), + scope: Set[FeatureFlag] = FeatureFlag.all, + ): Seq[Item] = { + val mirror = ru.runtimeMirror(getClass.getClassLoader) + val mirroredType = mirror.reflect(instance) + mirroredType.symbol.typeSignature.members + .flatMap(m => extractItem(mirror, m, baseTopic, scope).toList) + .toSeq + } + + def flattenItem(path: Seq[String])(item: Item): Seq[Item] = { + val newPath = path :+ item.name + item.copy(name = newPath.mkString(".")) +: item.subItems.flatMap(flattenItem(newPath)) + } + + def flattenItemsForManual(items: Seq[Item]): Seq[Item] = + items + .flatMap(flattenItem(Seq())) + .filter(_.subItems.isEmpty) + // strip trailing default topic such that we don't have to add "Top-level Commands" to every group in the manual + .map { itm => + itm.copy( + topic = + if ( + itm.topic.t.lengthCompare(1) > 0 && itm.topic.t.lastOption + .contains(defaultTopLevelTopicStr) + ) + Topic(itm.topic.t.take(itm.topic.t.length - 1)) + else itm.topic + ) + } + + def getItemsFlattenedForManual[T: ClassTag]( + instance: T, + baseTopic: Seq[String] = Seq(), + ): Seq[Item] = + flattenItemsForManual(getItems(instance, baseTopic)) + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + private def methodSignature[T](typ: ru.Type): MethodSignature = { + val methodType = typ.asInstanceOf[ru.TypeApi] + + def excludeImplicits(symbols: List[ru.Symbol]): List[ru.Symbol] = + symbols.filter(!_.isImplicit) + + def until[U](p: U => Boolean, f: U => U)(x: U): U = + if (p(x)) x else until(p, f)(f(x)) + + val args = + excludeImplicits(methodType.paramLists.flatten).map(symb => + (symb.name.toString, symb.typeSignature.toString) + ) + // return types can contain implicit parameter lists; ensure that these are excluded + val returnType = + until[ru.Type](typ => !typ.paramLists.flatten.exists(_.isImplicit), _.resultType)( + methodType.resultType + ) + MethodSignature(args, returnType.toString) + } + + def fromObject[T: ClassTag](instance: T): Seq[Item] = + getItems(instance) + + /** Format help for named items and their descriptions + * @param items Tuple of name and description + */ + def format(items: Item*): String = { + def underline(s: String) = s + System.lineSeparator + "-" * s.length + System.lineSeparator() + val grouped = items + .filter(_.subItems.nonEmpty) + .sortBy(_.name) + .map { case Item(name, signature, Summary(summary, flag), description, topic, _) => + s"$name - $summary" + } + .toList + + val topLevel = items + .filter(_.subItems.isEmpty) + .groupBy(_.topic) + .fmap(descs => + descs + .sortBy(_.name) // sort alphabetically + .map { case Item(name, signature, Summary(summary, flag), description, topic, _) => + s"$name - $summary" + } + .mkString(System.lineSeparator) + ) + .toList + .sortWith { (x, y) => + (x._1.t, y._1.t) match { + case (`defaultTopLevelTopic`, _) => true + case (_, `defaultTopLevelTopic`) => false + case (lft, rght) => lft.mkString(".") < rght.mkString(".") + } + } + .map({ case (tobj @ Topic(topic), h) => + (if (topic.nonEmpty) underline(tobj.toString) else "") + h + }) + .mkString(System.lineSeparator + System.lineSeparator) + if (grouped.nonEmpty) { + topLevel + System.lineSeparator + System.lineSeparator + underline("Command Groups") + grouped + .mkString(System.lineSeparator) + } else topLevel + } + + private def memberDescription( + member: ru.Symbol + ): Option[(Summary, Description, Topic, Option[String])] = { + ( + member.annotations.map(fromAnnotation(_, summaryParser)).collect { case Some(s) => s }, + member.annotations.map(fromAnnotation(_, descriptionParser)).collect { case Some(s) => s }, + member.annotations.map(fromAnnotation(_, tagParser)).collect { case Some(s) => s }, + member.annotations.map(fromAnnotation(_, groupParser)).collect { case Some(s) => s }, + ) match { + case (Nil, _, _, _) => None + case (summary :: _sums, l2, l3, g4) => + Some( + ( + summary, + l2.headOption.getOrElse(Description("")), + l3.headOption.getOrElse(Topic(Seq())), + g4.headOption.map(_.name), + ) + ) + } + } + + /** The following definitions (fromAnnotation and Xparser) are quite nasty. + * They hackily reconstruct the Scala values from annotations, which contain ASTs. + * It's possible to use a reflection Toolbox to eval the annotations, but this is very slow (as it entails compiling the ASTs + * from the annotations) and results in large latencies for the help command. + */ + private def fromAnnotation[T: ru.TypeTag]( + annotation: ru.Annotation, + parser: ru.Tree => T, + ): Option[T] = { + if (annotation.tree.tpe.typeSymbol == ru.typeOf[T].typeSymbol) { + Some(parser(annotation.tree)) + } else None + } + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + private def grabStringTag(tree: ru.Tree): String = + tree + .children(1) + .asInstanceOf[ru.Literal] + .value + .value + .asInstanceOf[String] + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + private def summaryParser(tree: ru.Tree): Summary = { + def grabFeatureFlagFromSummary(tree: ru.Tree): FeatureFlag = + if (tree.children.lengthCompare(2) > 0) { + val tmp = tree.children(2).asInstanceOf[ru.Select] + if (tmp.symbol.isModule) { + reflect.runtime.currentMirror + .reflectModule(tmp.symbol.asModule) + .instance + .asInstanceOf[FeatureFlag] + } else FeatureFlag.Stable + } else FeatureFlag.Stable + Summary(grabStringTag(tree), grabFeatureFlagFromSummary(tree)) + } + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + private def descriptionParser(tree: ru.Tree): Description = { + try { + Description(grabStringTag(tree).stripMargin) + } catch { + case x: RuntimeException => + // leave a comment for the poor developer that might run into the same issue ... + println( + "Failed to process description (description needs to be a string. i.e. don't apply stripmargin here ...): " + tree.toString + ) + throw x + } + } + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + private def tagParser(tree: ru.Tree): Topic = { + val args = tree + .children(1) + .children + .drop(1) + .map(l => l.asInstanceOf[ru.Literal].value.value.asInstanceOf[String]) + Topic(args) + } + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + private def groupParser(tree: ru.Tree): Group = Group(grabStringTag(tree)) + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/Helpful.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/Helpful.scala new file mode 100644 index 0000000000..f79ab7140e --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/Helpful.scala @@ -0,0 +1,23 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +/** Implementors will have a `help` method available that will be callable from the Console. + * Implementors should annotate appropriate methods with `@Help.Summary` to have them included. + */ +trait Helpful { + + def help()(implicit consoleEnvironment: ConsoleEnvironment): Unit = { + val featureSet = consoleEnvironment.featureSet + consoleEnvironment.consoleOutput.info(Help.forInstance(this, scope = featureSet)) + } + + @Help.Summary("Help for specific commands (use help() or help(\"method\") for more information)") + @Help.Topic(Seq("Top-level Commands")) + def help(methodName: String)(implicit consoleEnvironment: ConsoleEnvironment): Unit = + consoleEnvironment.consoleOutput.info( + Help.forMethod(this, methodName, scope = consoleEnvironment.featureSet) + ) + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala new file mode 100644 index 0000000000..673e8f2efd --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala @@ -0,0 +1,810 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import com.daml.lf.data.Ref.PackageId +import com.digitalasset.canton.* +import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand +import com.digitalasset.canton.config.RequireTypes.Port +import com.digitalasset.canton.config.* +import com.digitalasset.canton.console.CommandErrors.NodeNotStarted +import com.digitalasset.canton.console.commands.* +import com.digitalasset.canton.crypto.Crypto +import com.digitalasset.canton.domain.config.RemoteDomainConfig +import com.digitalasset.canton.domain.{Domain, DomainNodeBootstrap} +import com.digitalasset.canton.environment.* +import com.digitalasset.canton.health.admin.data.{DomainStatus, NodeStatus, ParticipantStatus} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} +import com.digitalasset.canton.participant.config.{ + BaseParticipantConfig, + LocalParticipantConfig, + RemoteParticipantConfig, +} +import com.digitalasset.canton.participant.domain.DomainConnectionConfig +import com.digitalasset.canton.participant.{ + ParticipantNode, + ParticipantNodeBootstrapX, + ParticipantNodeCommon, + ParticipantNodeX, +} +import com.digitalasset.canton.sequencing.{SequencerConnection, SequencerConnections} +import com.digitalasset.canton.topology.{DomainId, NodeIdentity, ParticipantId} +import com.digitalasset.canton.tracing.NoTracing +import com.digitalasset.canton.util.ErrorUtil + +import scala.concurrent.ExecutionContext +import scala.util.hashing.MurmurHash3 + +trait InstanceReferenceCommon + extends AdminCommandRunner + with Helpful + with NamedLogging + with FeatureFlagFilter + with PrettyPrinting { + + val name: String + protected val instanceType: String + + protected[canton] def executionContext: ExecutionContext + + override def pretty: Pretty[InstanceReferenceCommon] = + prettyOfString(inst => show"${inst.instanceType.unquoted} ${inst.name.singleQuoted}") + + val consoleEnvironment: ConsoleEnvironment + + override protected[console] def tracedLogger: TracedLogger = logger + + override def hashCode(): Int = { + val init = this.getClass.hashCode() + val t1 = MurmurHash3.mix(init, consoleEnvironment.hashCode()) + val t2 = MurmurHash3.mix(t1, name.hashCode) + t2 + } + + // this is just testing, because the cached values should remain unchanged in operation + @Help.Summary("Clear locally cached variables", FeatureFlag.Testing) + @Help.Description( + "Some commands cache values on the client side. Use this command to explicitly clear the caches of these values." + ) + def clear_cache(): Unit = { + topology.clearCache() + } + + type Status <: NodeStatus.Status + + def id: NodeIdentity + + def health: HealthAdministrationCommon[Status] + + def keys: KeyAdministrationGroup + + def topology: TopologyAdministrationGroupCommon +} + +/** Reference to "Old" daml 2.x nodes have: + * - parties admin commands + * - "old" topology admin commands based on "old" TopologyChangeOp + */ +trait InstanceReference extends InstanceReferenceCommon { + def parties: PartiesAdministrationGroup + override def topology: TopologyAdministrationGroup +} + +/** InstanceReferenceX with different topology administration x + */ +trait InstanceReferenceX extends InstanceReferenceCommon { + override def topology: TopologyAdministrationGroupX + + private lazy val trafficControl_ = + new TrafficControlAdministrationGroup( + this, + topology, + this, + consoleEnvironment, + loggerFactory, + ) + @Help.Summary("Traffic control related commands") + @Help.Group("Traffic") + def traffic_control: TrafficControlAdministrationGroup = trafficControl_ +} + +/** Pointer for a potentially running instance by instance type (domain/participant) and its id. + * These methods define the REPL interface for these instances (e.g. participant1 start) + */ +trait LocalInstanceReferenceCommon extends InstanceReferenceCommon with NoTracing { + + val name: String + val consoleEnvironment: ConsoleEnvironment + private[console] val nodes: Nodes[CantonNode, CantonNodeBootstrap[CantonNode]] + + @Help.Summary("Database related operations") + @Help.Group("Database") + object db extends Helpful { + + @Help.Summary("Migrates the instance's database if using a database storage") + def migrate(): Unit = consoleEnvironment.run(migrateDbCommand()) + + @Help.Summary( + "Only use when advised - repairs the database migration of the instance's database" + ) + @Help.Description( + """In some rare cases, we change already applied database migration files in a new release and the repair + |command resets the checksums we use to ensure that in general already applied migration files have not been changed. + |You should only use `db.repair_migration` when advised and otherwise use it at your own risk - in the worst case running + |it may lead to data corruption when an incompatible database migration (one that should be rejected because + |the already applied database migration files have changed) is subsequently falsely applied. + |""" + ) + def repair_migration(force: Boolean = false): Unit = + consoleEnvironment.run(repairMigrationCommand(force)) + + } + + @Help.Summary("Start the instance") + def start(): Unit = consoleEnvironment.run(startCommand()) + + @Help.Summary("Stop the instance") + def stop(): Unit = consoleEnvironment.run(stopCommand()) + + @Help.Summary("Check if the local instance is running") + def is_running: Boolean = nodes.isRunning(name) + + @Help.Summary("Check if the local instance is running and is fully initialized") + def is_initialized: Boolean = nodes.getRunning(name).exists(_.isInitialized) + + @Help.Summary("Config of node instance") + def config: LocalNodeConfig + + @Help.Summary("Manage public and secret keys") + @Help.Group("Keys") + override def keys: LocalKeyAdministrationGroup = _keys + + private val _keys = + new LocalKeyAdministrationGroup(this, this, consoleEnvironment, crypto, loggerFactory)( + executionContext + ) + + private[console] def migrateDbCommand(): ConsoleCommandResult[Unit] = + migrateInstanceDb().toResult(_.message, _ => ()) + + private[console] def repairMigrationCommand(force: Boolean): ConsoleCommandResult[Unit] = + repairMigrationOfInstance(force).toResult(_.message, _ => ()) + + private[console] def startCommand(): ConsoleCommandResult[Unit] = + startInstance() + .toResult({ + case m: PendingDatabaseMigration => + s"${m.message} Please run `${m.name}.db.migrate` to apply pending migrations" + case m => m.message + }) + + private[console] def stopCommand(): ConsoleCommandResult[Unit] = + try { + stopInstance().toResult(_.message) + } finally { + ErrorUtil.withThrowableLogging(clear_cache()) + } + + protected def migrateInstanceDb(): Either[StartupError, _] = nodes.migrateDatabase(name) + protected def repairMigrationOfInstance(force: Boolean): Either[StartupError, Unit] = { + Either + .cond(force, (), DidntUseForceOnRepairMigration(name)) + .flatMap(_ => nodes.repairDatabaseMigration(name)) + } + + protected def startInstance(): Either[StartupError, Unit] = + nodes.startAndWait(name) + protected def stopInstance(): Either[ShutdownError, Unit] = nodes.stopAndWait(name) + protected[canton] def crypto: Crypto + + protected def runCommandIfRunning[Result]( + runner: => ConsoleCommandResult[Result] + ): ConsoleCommandResult[Result] = + if (is_running) + runner + else + NodeNotStarted.ErrorCanton(this) + + override protected[console] def adminCommand[Result]( + grpcCommand: GrpcAdminCommand[_, _, Result] + ): ConsoleCommandResult[Result] = { + runCommandIfRunning( + consoleEnvironment.grpcAdminCommandRunner + .runCommand(name, grpcCommand, config.clientAdminApi, None) + ) + } + +} + +trait LocalInstanceReference extends LocalInstanceReferenceCommon with InstanceReference +trait LocalInstanceReferenceX extends LocalInstanceReferenceCommon with InstanceReferenceX + +trait RemoteInstanceReference extends InstanceReferenceCommon { + @Help.Summary("Manage public and secret keys") + @Help.Group("Keys") + override val keys: KeyAdministrationGroup = + new KeyAdministrationGroup(this, this, consoleEnvironment, loggerFactory) +} + +trait GrpcRemoteInstanceReference extends RemoteInstanceReference { + + def config: NodeConfig + + override protected[console] def adminCommand[Result]( + grpcCommand: GrpcAdminCommand[_, _, Result] + ): ConsoleCommandResult[Result] = + consoleEnvironment.grpcAdminCommandRunner.runCommand( + name, + grpcCommand, + config.clientAdminApi, + None, + ) +} + +object DomainReference { + val InstanceType = "Domain" +} + +trait DomainReference + extends InstanceReference + with DomainAdministration + with InstanceReferenceWithSequencer { + val consoleEnvironment: ConsoleEnvironment + val name: String + + override protected val instanceType: String = DomainReference.InstanceType + + override type Status = DomainStatus + + @Help.Summary("Health and diagnostic related commands") + @Help.Group("Health") + override def health = + new HealthAdministration[DomainStatus]( + this, + consoleEnvironment, + DomainStatus.fromProtoV0, + ) + + @Help.Summary( + "Yields the globally unique id of this domain. " + + "Throws an exception, if the id has not yet been allocated (e.g., the domain has not yet been started)." + ) + def id: DomainId = topology.idHelper(DomainId(_)) + + private lazy val topology_ = + new TopologyAdministrationGroup( + this, + this.health.status.successOption.map(_.topologyQueue), + consoleEnvironment, + loggerFactory, + ) + @Help.Summary("Topology management related commands") + @Help.Group("Topology") + @Help.Description("This group contains access to the full set of topology management commands.") + override def topology: TopologyAdministrationGroup = topology_ + + override protected val loggerFactory: NamedLoggerFactory = NamedLoggerFactory("domain", name) + + override def equals(obj: Any): Boolean = { + obj match { + case x: DomainReference => x.consoleEnvironment == consoleEnvironment && x.name == name + case _ => false + } + } + + @Help.Summary("Inspect configured parties") + @Help.Group("Parties") + override def parties: PartiesAdministrationGroup = partiesGroup + + // above command needs to be def such that `Help` works. + lazy private val partiesGroup = new PartiesAdministrationGroup(this, consoleEnvironment) + + private lazy val sequencer_ = + new SequencerAdministrationGroup(this, consoleEnvironment, loggerFactory) + @Help.Summary("Manage the sequencer") + @Help.Group("Sequencer") + override def sequencer: SequencerAdministrationGroup = sequencer_ + + private lazy val mediator_ = + new MediatorAdministrationGroup(this, consoleEnvironment, loggerFactory) + @Help.Summary("Manage the mediator") + @Help.Group("Mediator") + def mediator: MediatorAdministrationGroup = mediator_ + + @Help.Summary( + "Yields a domain connection config with default values except for the domain alias and the sequencer connection. " + + "May throw an exception if the domain alias or sequencer connection is misconfigured." + ) + def defaultDomainConnection: DomainConnectionConfig = + DomainConnectionConfig( + DomainAlias.tryCreate(name), + SequencerConnections.single(sequencerConnection), + ) +} + +trait RemoteDomainReference extends DomainReference with GrpcRemoteInstanceReference { + val consoleEnvironment: ConsoleEnvironment + val name: String + + @Help.Summary("Returns the remote domain configuration") + def config: RemoteDomainConfig = + consoleEnvironment.environment.config.remoteDomainsByString(name) + + override def sequencerConnection: SequencerConnection = + config.publicApi.toConnection + .fold( + err => sys.error(s"Domain $name has invalid sequencer connection config: $err"), + identity, + ) + +} + +trait CommunityDomainReference { + this: DomainReference => +} + +class CommunityRemoteDomainReference(val consoleEnvironment: ConsoleEnvironment, val name: String) + extends DomainReference + with CommunityDomainReference + with RemoteDomainReference { + + override protected[canton] def executionContext: ExecutionContext = + consoleEnvironment.environment.executionContext +} + +trait InstanceReferenceWithSequencerConnection extends InstanceReferenceCommon { + def sequencerConnection: SequencerConnection +} +trait InstanceReferenceWithSequencer extends InstanceReferenceWithSequencerConnection { + def sequencer: SequencerAdministrationGroup +} + +trait LocalDomainReference + extends DomainReference + with BaseInspection[Domain] + with LocalInstanceReference { + override private[console] val nodes = consoleEnvironment.environment.domains + + @Help.Summary("Returns the domain configuration") + def config: consoleEnvironment.environment.config.DomainConfigType = + consoleEnvironment.environment.config.domainsByString(name) + + override def sequencerConnection: SequencerConnection = + config.sequencerConnectionConfig.toConnection + .fold( + err => sys.error(s"Domain $name has invalid sequencer connection config: $err"), + identity, + ) + + override protected[console] def runningNode: Option[DomainNodeBootstrap] = + consoleEnvironment.environment.domains.getRunning(name) + + override protected[console] def startingNode: Option[DomainNodeBootstrap] = + consoleEnvironment.environment.domains.getStarting(name) +} + +class CommunityLocalDomainReference( + override val consoleEnvironment: ConsoleEnvironment, + val name: String, + override protected[canton] val executionContext: ExecutionContext, +) extends DomainReference + with CommunityDomainReference + with LocalDomainReference + +/** Bare, Canton agnostic parts of the ledger-api client + * + * This implementation allows to access any kind of ledger-api client, which does not need to be Canton based. + * However, this comes at some cost, as some of the synchronization between nodes during transaction submission + * is not supported + * + * @param hostname the hostname of the ledger api server + * @param port the port of the ledger api server + * @param tls the tls config to use on the client + * @param token the jwt token to use on the client + */ +class ExternalLedgerApiClient( + hostname: String, + port: Port, + tls: Option[TlsClientConfig], + val token: Option[String] = None, +)(implicit val consoleEnvironment: ConsoleEnvironment) + extends BaseLedgerApiAdministration + with LedgerApiCommandRunner + with FeatureFlagFilter + with NamedLogging { + + override protected val name: String = s"$hostname:${port.unwrap}" + + override val loggerFactory: NamedLoggerFactory = + consoleEnvironment.environment.loggerFactory.append("client", name) + + override protected def domainOfTransaction(transactionId: String): DomainId = + throw new NotImplementedError("domain_of is not implemented for external ledger api clients") + + override protected[console] def ledgerApiCommand[Result]( + command: GrpcAdminCommand[_, _, Result] + ): ConsoleCommandResult[Result] = + consoleEnvironment.grpcAdminCommandRunner + .runCommand("sourceLedger", command, ClientConfig(hostname, port, tls), token) + + override protected def optionallyAwait[Tx]( + tx: Tx, + txId: String, + optTimeout: Option[NonNegativeDuration], + ): Tx = tx + +} + +object ExternalLedgerApiClient { + + def forReference(participant: LocalParticipantReference, token: String)(implicit + env: ConsoleEnvironment + ): ExternalLedgerApiClient = { + val cc = participant.config.ledgerApi.clientConfig + new ExternalLedgerApiClient( + cc.address, + cc.port, + cc.tls, + Some(token), + ) + } +} + +object ParticipantReference { + val InstanceType = "Participant" +} + +sealed trait ParticipantReferenceCommon + extends ConsoleCommandGroup + with ParticipantAdministration + with LedgerApiAdministration + with LedgerApiCommandRunner + with AdminCommandRunner + with InstanceReferenceCommon { + + override type Status = ParticipantStatus + + override protected val loggerFactory: NamedLoggerFactory = + consoleEnvironment.environment.loggerFactory.append("participant", name) + + @Help.Summary( + "Yields the globally unique id of this participant. " + + "Throws an exception, if the id has not yet been allocated (e.g., the participant has not yet been started)." + ) + override def id: ParticipantId = topology.idHelper(ParticipantId(_)) + + def config: BaseParticipantConfig + + @Help.Summary("Commands used for development and testing", FeatureFlag.Testing) + @Help.Group("Testing") + def testing: ParticipantTestingGroup + + @Help.Summary("Commands to pruning the archive of the ledger", FeatureFlag.Preview) + @Help.Group("Ledger Pruning") + def pruning: ParticipantPruningAdministrationGroup = pruning_ + private lazy val pruning_ = + new ParticipantPruningAdministrationGroup(this, consoleEnvironment, loggerFactory) + + @Help.Summary("Manage participant replication") + @Help.Group("Replication") + def replication: ParticipantReplicationAdministrationGroup = replicationGroup + lazy private val replicationGroup = + new ParticipantReplicationAdministrationGroup(this, consoleEnvironment) + + @Help.Summary("Commands to repair the participant contract state", FeatureFlag.Repair) + @Help.Group("Repair") + def repair: ParticipantRepairAdministration + + override def health + : HealthAdministrationCommon[ParticipantStatus] & ParticipantHealthAdministrationCommon + +} + +abstract class ParticipantReference( + override val consoleEnvironment: ConsoleEnvironment, + val name: String, +) extends ParticipantReferenceCommon + with InstanceReference { + + protected def runner: AdminCommandRunner = this + + override protected val instanceType: String = ParticipantReference.InstanceType + + @Help.Summary("Health and diagnostic related commands") + @Help.Group("Health") + override def health: ParticipantHealthAdministration = + new ParticipantHealthAdministration(this, consoleEnvironment, loggerFactory) + + @Help.Summary("Inspect and manage parties") + @Help.Group("Parties") + def parties: ParticipantPartiesAdministrationGroup + + @Help.Summary( + "Yields the globally unique id of this participant. " + + "Throws an exception, if the id has not yet been allocated (e.g., the participant has not yet been started)." + ) + override def id: ParticipantId = topology.idHelper(ParticipantId(_)) + + private lazy val topology_ = + new TopologyAdministrationGroup( + this, + health.status.successOption.map(_.topologyQueue), + consoleEnvironment, + loggerFactory, + ) + @Help.Summary("Topology management related commands") + @Help.Group("Topology") + @Help.Description("This group contains access to the full set of topology management commands.") + def topology: TopologyAdministrationGroup = topology_ + override protected def vettedPackagesOfParticipant(): Set[PackageId] = topology.vetted_packages + .list(filterStore = "Authorized", filterParticipant = id.filterString) + .flatMap(_.item.packageIds) + .toSet + + override protected def participantIsActiveOnDomain( + domainId: DomainId, + participantId: ParticipantId, + ): Boolean = topology.participant_domain_states.active(domainId, participantId) +} + +sealed trait RemoteParticipantReferenceCommon + extends LedgerApiCommandRunner + with ParticipantReferenceCommon { + + def config: RemoteParticipantConfig + + override protected[console] def ledgerApiCommand[Result]( + command: GrpcAdminCommand[_, _, Result] + ): ConsoleCommandResult[Result] = + consoleEnvironment.grpcAdminCommandRunner.runCommand( + name, + command, + config.clientLedgerApi, + config.token, + ) + + override protected[console] def token: Option[String] = config.token + + private lazy val testing_ = new ParticipantTestingGroup(this, consoleEnvironment, loggerFactory) + @Help.Summary("Commands used for development and testing", FeatureFlag.Testing) + @Help.Group("Testing") + override def testing: ParticipantTestingGroup = testing_ + + private lazy val repair_ = + new ParticipantRepairAdministration(consoleEnvironment, this, loggerFactory) + + @Help.Summary("Commands to repair the participant contract state", FeatureFlag.Repair) + @Help.Group("Repair") + def repair: ParticipantRepairAdministration = repair_ +} + +class RemoteParticipantReference(environment: ConsoleEnvironment, override val name: String) + extends ParticipantReference(environment, name) + with GrpcRemoteInstanceReference + with RemoteParticipantReferenceCommon { + + @Help.Summary("Inspect and manage parties") + @Help.Group("Parties") + def parties: ParticipantPartiesAdministrationGroup = partiesGroup + + // above command needs to be def such that `Help` works. + lazy private val partiesGroup = + new ParticipantPartiesAdministrationGroup(id, this, consoleEnvironment) + + @Help.Summary("Return remote participant config") + def config: RemoteParticipantConfig = + consoleEnvironment.environment.config.remoteParticipantsByString(name) + + override def equals(obj: Any): Boolean = { + obj match { + case x: RemoteParticipantReference => + x.consoleEnvironment == consoleEnvironment && x.name == name + case _ => false + } + } + +} + +sealed trait LocalParticipantReferenceCommon + extends LedgerApiCommandRunner + with ParticipantReferenceCommon + with LocalInstanceReferenceCommon { + + def config: LocalParticipantConfig + + def adminToken: Option[String] + + override protected[console] def ledgerApiCommand[Result]( + command: GrpcAdminCommand[_, _, Result] + ): ConsoleCommandResult[Result] = + runCommandIfRunning( + consoleEnvironment.grpcAdminCommandRunner + .runCommand(name, command, config.clientLedgerApi, adminToken) + ) + + override protected[console] def token: Option[String] = adminToken + + @Help.Summary("Commands to repair the local participant contract state", FeatureFlag.Repair) + @Help.Group("Repair") + def repair: LocalParticipantRepairAdministration +} + +class LocalParticipantReference( + override val consoleEnvironment: ConsoleEnvironment, + name: String, +) extends ParticipantReference(consoleEnvironment, name) + with LocalParticipantReferenceCommon + with LocalInstanceReference + with BaseInspection[ParticipantNode] { + + override private[console] val nodes = consoleEnvironment.environment.participants + + @Help.Summary("Return participant config") + def config: LocalParticipantConfig = + consoleEnvironment.environment.config.participantsByString(name) + + private lazy val testing_ = + new LocalParticipantTestingGroup(this, consoleEnvironment, loggerFactory) + @Help.Summary("Commands used for development and testing", FeatureFlag.Testing) + override def testing: LocalParticipantTestingGroup = testing_ + + private lazy val commitments_ = + new LocalCommitmentsAdministrationGroup(this, consoleEnvironment, loggerFactory) + @Help.Summary("Commands to inspect and extract bilateral commitments", FeatureFlag.Preview) + @Help.Group("Commitments") + def commitments: LocalCommitmentsAdministrationGroup = commitments_ + + private lazy val repair_ = + new LocalParticipantRepairAdministration(consoleEnvironment, this, loggerFactory) { + override protected def access[T](handler: ParticipantNodeCommon => T): T = + LocalParticipantReference.this.access(handler) + } + @Help.Summary("Commands to repair the local participant contract state", FeatureFlag.Repair) + @Help.Group("Repair") + def repair: LocalParticipantRepairAdministration = repair_ + + @Help.Summary("Inspect and manage parties") + @Help.Group("Parties") + override def parties: LocalParticipantPartiesAdministrationGroup = partiesGroup + // above command needs to be def such that `Help` works. + lazy private val partiesGroup = + new LocalParticipantPartiesAdministrationGroup(this, this, consoleEnvironment, loggerFactory) + + /** secret, not publicly documented way to get the admin token */ + def adminToken: Option[String] = underlying.map(_.adminToken.secret) + + override def equals(obj: Any): Boolean = { + obj match { + case x: LocalParticipantReference => + x.consoleEnvironment == consoleEnvironment && x.name == name + case _ => false + } + } + + override def runningNode: Option[CantonNodeBootstrap[ParticipantNode]] = + consoleEnvironment.environment.participants.getRunning(name) + + override def startingNode: Option[CantonNodeBootstrap[ParticipantNode]] = + consoleEnvironment.environment.participants.getStarting(name) + +} + +abstract class ParticipantReferenceX( + override val consoleEnvironment: ConsoleEnvironment, + val name: String, +) extends ParticipantReferenceCommon + with InstanceReferenceX { + + override protected val instanceType: String = ParticipantReferenceX.InstanceType + override protected def runner: AdminCommandRunner = this + + @Help.Summary("Health and diagnostic related commands") + @Help.Group("Health") + override def health: ParticipantHealthAdministrationX = + new ParticipantHealthAdministrationX(this, consoleEnvironment, loggerFactory) + + @Help.Summary("Inspect and manage parties") + @Help.Group("Parties") + def parties: ParticipantPartiesAdministrationGroupX + + private lazy val topology_ = + new TopologyAdministrationGroupX( + this, + health.status.successOption.map(_.topologyQueue), + consoleEnvironment, + loggerFactory, + ) + @Help.Summary("Topology management related commands") + @Help.Group("Topology") + @Help.Description("This group contains access to the full set of topology management commands.") + override def topology: TopologyAdministrationGroupX = topology_ + override protected def vettedPackagesOfParticipant(): Set[PackageId] = topology.vetted_packages + .list(filterStore = "Authorized", filterParticipant = id.filterString) + .flatMap(_.item.packageIds) + .toSet + override protected def participantIsActiveOnDomain( + domainId: DomainId, + participantId: ParticipantId, + ): Boolean = topology.domain_trust_certificates.active(domainId, participantId) + +} +object ParticipantReferenceX { + val InstanceType = "ParticipantX" +} + +class RemoteParticipantReferenceX(environment: ConsoleEnvironment, override val name: String) + extends ParticipantReferenceX(environment, name) + with GrpcRemoteInstanceReference + with RemoteParticipantReferenceCommon { + + @Help.Summary("Inspect and manage parties") + @Help.Group("Parties") + override def parties: ParticipantPartiesAdministrationGroupX = partiesGroup + + // above command needs to be def such that `Help` works. + lazy private val partiesGroup = + new ParticipantPartiesAdministrationGroupX(id, this, consoleEnvironment) + + @Help.Summary("Return remote participant config") + def config: RemoteParticipantConfig = + consoleEnvironment.environment.config.remoteParticipantsByStringX(name) + + override def equals(obj: Any): Boolean = { + obj match { + case x: RemoteParticipantReference => + x.consoleEnvironment == consoleEnvironment && x.name == name + case _ => false + } + } + +} + +class LocalParticipantReferenceX( + override val consoleEnvironment: ConsoleEnvironment, + name: String, +) extends ParticipantReferenceX(consoleEnvironment, name) + with LocalParticipantReferenceCommon + with LocalInstanceReferenceX + with BaseInspection[ParticipantNodeX] { + + override private[console] val nodes = consoleEnvironment.environment.participantsX + + @Help.Summary("Return participant config") + def config: LocalParticipantConfig = + consoleEnvironment.environment.config.participantsByStringX(name) + + override def runningNode: Option[ParticipantNodeBootstrapX] = + consoleEnvironment.environment.participantsX.getRunning(name) + + override def startingNode: Option[ParticipantNodeBootstrapX] = + consoleEnvironment.environment.participantsX.getStarting(name) + + /** secret, not publicly documented way to get the admin token */ + def adminToken: Option[String] = underlying.map(_.adminToken.secret) + + // TODO(#14048) these are "remote" groups. the normal participant node has "local" versions. + // but rather than keeping this, we should make local == remote and add local methods separately + @Help.Summary("Inspect and manage parties") + @Help.Group("Parties") + def parties: LocalParticipantPartiesAdministrationGroupX = partiesGroup + // above command needs to be def such that `Help` works. + lazy private val partiesGroup = + new LocalParticipantPartiesAdministrationGroupX(this, this, consoleEnvironment, loggerFactory) + + private lazy val testing_ = new ParticipantTestingGroup(this, consoleEnvironment, loggerFactory) + @Help.Summary("Commands used for development and testing", FeatureFlag.Testing) + @Help.Group("Testing") + override def testing: ParticipantTestingGroup = testing_ + + private lazy val repair_ = + new LocalParticipantRepairAdministration(consoleEnvironment, this, loggerFactory) { + override protected def access[T](handler: ParticipantNodeCommon => T): T = + LocalParticipantReferenceX.this.access(handler) + } + + @Help.Summary("Commands to repair the local participant contract state", FeatureFlag.Repair) + @Help.Group("Repair") + def repair: LocalParticipantRepairAdministration = repair_ +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/InstancesExtensions.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/InstancesExtensions.scala new file mode 100644 index 0000000000..1c741bde45 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/InstancesExtensions.scala @@ -0,0 +1,75 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import com.digitalasset.canton.environment.{CantonNode, CantonNodeBootstrap, Nodes} +import com.digitalasset.canton.tracing.TraceContext + +/** Aliases to manage a sequence of instances in a REPL environment + */ +trait LocalInstancesExtensions extends Helpful { + + import ConsoleCommandResult.runAll + + def instances: Seq[LocalInstanceReferenceCommon] + + @Help.Summary("Database management related operations") + @Help.Group("Database") + object db extends Helpful { + + @Help.Summary("Migrate all databases") + def migrate()(implicit consoleEnvironment: ConsoleEnvironment): Unit = { + val _ = runAll(instances.sorted(consoleEnvironment.startupOrdering)) { + _.migrateDbCommand() + } + } + + @Help.Summary("Only use when advised - repair the database migration of all nodes") + @Help.Description( + """In some rare cases, we change already applied database migration files in a new release and the repair + |command resets the checksums we use to ensure that in general already applied migration files have not been changed. + |You should only use `db.repair_migration` when advised and otherwise use it at your own risk - in the worst case running + |it may lead to data corruption when an incompatible database migration (one that should be rejected because + |the already applied database migration files have changed) is subsequently falsely applied. + |""" + ) + def repair_migration( + force: Boolean = false + )(implicit consoleEnvironment: ConsoleEnvironment): Unit = { + val _ = runAll(instances.sorted(consoleEnvironment.startupOrdering)) { + _.repairMigrationCommand(force) + } + } + + } + + private def runOnAllInstances[T]( + cmd: Seq[(String, Nodes[CantonNode, CantonNodeBootstrap[CantonNode]])] => Either[T, Unit] + )(implicit consoleEnvironment: ConsoleEnvironment): Unit = + consoleEnvironment.runE(cmd(instances.map(x => (x.name, x.nodes)))) + + @Help.Summary("Start all") + def start()(implicit consoleEnvironment: ConsoleEnvironment): Unit = + TraceContext.withNewTraceContext { implicit traceContext => + runOnAllInstances(consoleEnvironment.environment.startNodes(_)) + } + + @Help.Summary("Stop all") + def stop()(implicit consoleEnvironment: ConsoleEnvironment): Unit = + TraceContext.withNewTraceContext { implicit traceContext => + runOnAllInstances(consoleEnvironment.environment.stopNodes(_)) + } + +} + +object LocalInstancesExtensions { + class Impl(val instances: Seq[LocalInstanceReferenceCommon]) extends LocalInstancesExtensions {} +} + +class LocalDomainReferencesExtensions(domains: Seq[LocalDomainReference]) + extends LocalInstancesExtensions { + + override def instances: Seq[LocalDomainReference] = domains + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ParticipantReferencesExtensions.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ParticipantReferencesExtensions.scala new file mode 100644 index 0000000000..0859e9a3a3 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/ParticipantReferencesExtensions.scala @@ -0,0 +1,146 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import cats.syntax.traverse.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.NonNegativeDuration +import com.digitalasset.canton.console.commands.ParticipantCommands +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.participant.domain.DomainConnectionConfig +import com.digitalasset.canton.{DomainAlias, SequencerAlias} + +class ParticipantReferencesExtensions(participants: Seq[ParticipantReferenceCommon])(implicit + override val consoleEnvironment: ConsoleEnvironment +) extends Helpful + with NamedLogging + with FeatureFlagFilter { + + protected override def loggerFactory: NamedLoggerFactory = + consoleEnvironment.environment.loggerFactory + + @Help.Summary("Manage dars on several participants at once") + @Help.Group("DAR Management") + object dars extends Helpful { + @Help.Summary("Upload DARs to participants") + @Help.Description( + """If vetAllPackages is true, the participants will vet the package on all domains they are registered. + If synchronizeVetting is true, the command will block until the package vetting transaction has been registered with all connected domains.""" + ) + def upload( + darPath: String, + vetAllPackages: Boolean = true, + synchronizeVetting: Boolean = true, + ): Map[ParticipantReferenceCommon, String] = { + val res = ConsoleCommandResult.runAll(participants)( + ParticipantCommands.dars + .upload( + _, + darPath, + vetAllPackages = vetAllPackages, + synchronizeVetting = synchronizeVetting, + logger, + ) + ) + if (synchronizeVetting && vetAllPackages) { + participants.foreach(_.packages.synchronize_vetting()) + } + res + } + } + + @Help.Summary("Manage domain connections on several participants at once") + @Help.Group("Domains") + object domains extends Helpful { + + @Help.Summary("Disconnect from domain") + def disconnect(alias: DomainAlias): Unit = + ConsoleCommandResult + .runAll(participants)(ParticipantCommands.domains.disconnect(_, alias)) + .discard + + @Help.Summary("Disconnect from a local domain") + def disconnect_local(domain: LocalDomainReference): Unit = + ConsoleCommandResult + .runAll(participants)( + ParticipantCommands.domains.disconnect(_, DomainAlias.tryCreate(domain.name)) + ) + .discard + + @Help.Summary("Reconnect to domain") + @Help.Description( + "If retry is set to true (default), the command will return after the first attempt, but keep on trying in the background." + ) + def reconnect(alias: DomainAlias, retry: Boolean = true): Unit = + ConsoleCommandResult + .runAll(participants)( + ParticipantCommands.domains.reconnect(_, alias, retry) + ) + .discard + + @Help.Summary("Reconnect to all domains for which `manualStart` = false") + @Help.Description( + """If ignoreFailures is set to true (default), the reconnect all will succeed even if some domains are offline. + | The participants will continue attempting to establish a domain connection.""" + ) + def reconnect_all(ignoreFailures: Boolean = true): Unit = { + val _ = ConsoleCommandResult.runAll(participants)( + ParticipantCommands.domains.reconnect_all(_, ignoreFailures = ignoreFailures) + ) + } + + @Help.Summary("Disconnect from all connected domains") + def disconnect_all(): Unit = + ConsoleCommandResult + .runAll(participants) { p => + ConsoleCommandResult.fromEither(for { + connected <- ParticipantCommands.domains.list_connected(p).toEither + _ <- connected + .traverse(d => ParticipantCommands.domains.disconnect(p, d.domainAlias).toEither) + } yield ()) + } + .discard + + @Help.Summary("Register and potentially connect to domain") + def register(config: DomainConnectionConfig): Unit = + ConsoleCommandResult + .runAll(participants)(ParticipantCommands.domains.register(_, config)) + .discard + + @Help.Summary("Register and potentially connect to new local domain") + @Help.Description(""" + The arguments are: + domain - A local domain or sequencer reference + manualConnect - Whether this connection should be handled manually and also excluded from automatic re-connect. + synchronize - A timeout duration indicating how long to wait for all topology changes to have been effected on all local nodes. + """) + def connect_local( + domain: InstanceReferenceWithSequencerConnection, + manualConnect: Boolean = false, + alias: Option[DomainAlias] = None, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + ): Unit = { + val config = + ParticipantCommands.domains.referenceToConfig( + NonEmpty.mk(Seq, SequencerAlias.Default -> domain).toMap, + manualConnect, + alias, + ) + register(config) + synchronize.foreach { timeout => + ConsoleMacros.utils.synchronize_topology(Some(timeout))(consoleEnvironment) + } + } + } + +} + +class LocalParticipantReferencesExtensions(participants: Seq[LocalParticipantReference])(implicit + override val consoleEnvironment: ConsoleEnvironment +) extends ParticipantReferencesExtensions(participants) + with LocalInstancesExtensions { + override def instances: Seq[LocalInstanceReferenceCommon] = participants +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ConsoleCommandGroup.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ConsoleCommandGroup.scala new file mode 100644 index 0000000000..1ca5952e29 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ConsoleCommandGroup.scala @@ -0,0 +1,26 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console.commands + +import com.digitalasset.canton.console.{ + AdminCommandRunner, + ConsoleEnvironment, + FeatureFlagFilter, + Helpful, +} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} + +trait ConsoleCommandGroup extends Helpful with FeatureFlagFilter with NamedLogging { + protected def runner: AdminCommandRunner + protected def consoleEnvironment: ConsoleEnvironment + private[commands] def myLoggerFactory: NamedLoggerFactory = loggerFactory +} + +object ConsoleCommandGroup { + class Impl(parent: ConsoleCommandGroup) extends ConsoleCommandGroup { + override protected def consoleEnvironment: ConsoleEnvironment = parent.consoleEnvironment + override protected def runner: AdminCommandRunner = parent.runner + override protected def loggerFactory: NamedLoggerFactory = parent.myLoggerFactory + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/DomainAdministration.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/DomainAdministration.scala new file mode 100644 index 0000000000..26c57aaf02 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/DomainAdministration.scala @@ -0,0 +1,438 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console.commands + +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.admin.api.client.commands.DomainAdminCommands.GetDomainParameters +import com.digitalasset.canton.admin.api.client.commands.{ + DomainAdminCommands, + TopologyAdminCommands, +} +import com.digitalasset.canton.admin.api.client.data.{ + DynamicDomainParameters, + ListParticipantDomainStateResult, + StaticDomainParameters, +} +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.config.{ + ConsoleCommandTimeout, + NonNegativeDuration, + NonNegativeFiniteDuration, + PositiveDurationSeconds, +} +import com.digitalasset.canton.console.CommandErrors.GenericCommandError +import com.digitalasset.canton.console.{ + AdminCommandRunner, + ConsoleEnvironment, + FeatureFlagFilter, + Help, + Helpful, +} +import com.digitalasset.canton.domain.service.ServiceAgreementAcceptance +import com.digitalasset.canton.error.CantonError +import com.digitalasset.canton.health.admin.data.NodeStatus +import com.digitalasset.canton.logging.NamedLogging +import com.digitalasset.canton.time.EnrichedDurations.* +import com.digitalasset.canton.topology.TopologyManagerError.IncreaseOfLedgerTimeRecordTimeTolerance +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.admin.grpc.BaseQuery +import com.digitalasset.canton.topology.store.{TimeQuery, TopologyStoreId} +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ShowUtil.* +import com.google.protobuf.ByteString + +import java.time.Duration +import scala.concurrent.{ExecutionContext, Future} +import scala.math.Ordering.Implicits.infixOrderingOps +import scala.util.chaining.scalaUtilChainingOps + +trait DomainAdministration { + this: AdminCommandRunner with FeatureFlagFilter with NamedLogging => + protected val consoleEnvironment: ConsoleEnvironment + + def id: DomainId + def topology: TopologyAdministrationGroup + protected def timeouts: ConsoleCommandTimeout = consoleEnvironment.commandTimeouts + + type Status <: NodeStatus.Status + def health: HealthAdministration[Status] + + // The DomainTopologyTransactionMessage is about 2500 bytes and each recipient about 100 bytes. + // with this minimum we can have up to 275 recipients for a domain transaction change. + private val minimumMaxRequestSizeBytes = NonNegativeInt.tryCreate(30000) + + @Help.Summary("Manage participant permissions") + @Help.Group("Participants") + object participants extends Helpful { + + @Help.Summary("List participant states") + @Help.Description( + """This command will list the currently valid state as stored in the authorized store. + | For a deep inspection of the identity management history, use the `topology.participant_domain_states.list` command.""" + ) + def list(): Seq[ListParticipantDomainStateResult] = { + consoleEnvironment + .run { + adminCommand( + TopologyAdminCommands.Read.ListParticipantDomainState( + BaseQuery( + filterStore = TopologyStoreId.AuthorizedStore.filterName, + useStateStore = false, + ops = None, + timeQuery = TimeQuery.HeadState, + filterSigningKey = "", + protocolVersion = None, + ), + filterDomain = "", + filterParticipant = "", + ) + ) + } + .filter(_.item.side != RequestSide.To) + } + + @Help.Summary("Change state and trust level of participant") + @Help.Description("""Set the state of the participant within the domain. + Valid permissions are 'Submission', 'Confirmation', 'Observation' and 'Disabled'. + Valid trust levels are 'Vip' and 'Ordinary'. + Synchronize timeout can be used to ensure that the state has been propagated into the node + """) + def set_state( + participant: ParticipantId, + permission: ParticipantPermission, + trustLevel: TrustLevel = TrustLevel.Ordinary, + synchronize: Option[NonNegativeDuration] = Some(timeouts.bounded), + ): Unit = { + val _ = consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Write.AuthorizeParticipantDomainState( + TopologyChangeOp.Add, + None, + RequestSide.From, + id, + participant, + permission, + trustLevel, + replaceExisting = true, + ) + ) + } + synchronize.foreach(topology.synchronisation.await_idle) + } + + @Help.Summary("Test whether a participant is permissioned on this domain") + def active(participantId: ParticipantId): Boolean = + topology.participant_domain_states.active(id, participantId) + } + + @Help.Summary("Domain service commands") + @Help.Group("Service") + object service extends Helpful { + + @Help.Summary("List the accepted service agreements") + def list_accepted_agreements(): Seq[ServiceAgreementAcceptance] = + consoleEnvironment.run(adminCommand(DomainAdminCommands.ListAcceptedServiceAgreements)) + + @Help.Summary("Get the Static Domain Parameters configured for the domain") + def get_static_domain_parameters: StaticDomainParameters = + consoleEnvironment.run( + adminCommand(GetDomainParameters()) + ) + + @Help.Summary("Get the Dynamic Domain Parameters configured for the domain") + def get_dynamic_domain_parameters: DynamicDomainParameters = topology.domain_parameters_changes + .list("Authorized") + .sortBy(_.context.validFrom)(implicitly[Ordering[java.time.Instant]].reverse) + .headOption + .map(_.item) + .getOrElse( + throw new IllegalStateException("No dynamic domain parameters found in the domain") + ) + + @Help.Summary("Get the reconciliation interval configured for the domain") + @Help.Description("""Depending on the protocol version used on the domain, the value will be + read either from the static domain parameters or the dynamic ones.""") + def get_reconciliation_interval: PositiveDurationSeconds = + get_dynamic_domain_parameters.reconciliationInterval + + @Help.Summary("Get the max rate per participant") + @Help.Description("""Depending on the protocol version used on the domain, the value will be + read either from the static domain parameters or the dynamic ones.""") + def get_max_rate_per_participant: NonNegativeInt = + get_dynamic_domain_parameters.maxRatePerParticipant + + @Help.Summary("Get the max request size") + @Help.Description("""Depending on the protocol version used on the domain, the value will be + read either from the static domain parameters or the dynamic ones. + This value is not necessarily the one used by the sequencer node because it requires a restart + of the server to be taken into account.""") + def get_max_request_size: NonNegativeInt = + TraceContext.withNewTraceContext { implicit tc => + get_dynamic_domain_parameters.maxRequestSize.tap { res => + logger.info( + s"This value ($res) is not necessarily the one used by the sequencer node because it requires a restart of the server to be taken into account" + ) + } + } + + @Help.Summary("Get the mediator deduplication timeout") + @Help.Description( + "The method will fail, if the domain does not support the mediatorDeduplicationTimeout." + ) + def get_mediator_deduplication_timeout: NonNegativeFiniteDuration = + get_dynamic_domain_parameters.mediatorDeduplicationTimeout + + @Help.Summary("Update the mediator deduplication timeout") + @Help.Description( + """The method will fail: + | + |- if the domain does not support the ``mediatorDeduplicationTimeout`` parameter, + |- if the new value of ``mediatorDeduplicationTimeout`` is less than twice the value of ``ledgerTimeRecordTimeTolerance.``""" + ) + def set_mediator_deduplication_timeout( + newMediatorDeduplicationTimeout: NonNegativeFiniteDuration + ): Unit = + update_dynamic_domain_parameters( + _.copy(mediatorDeduplicationTimeout = newMediatorDeduplicationTimeout) + ) + + @Help.Summary("Set the Dynamic Domain Parameters configured for the domain") + @Help.Description( + """force: Enable potentially dangerous changes. Required to increase ``ledgerTimeRecordTimeTolerance``. + |Use ``set_ledger_time_record_time_tolerance`` to securely increase ``ledgerTimeRecordTimeTolerance``.""" + ) + def set_dynamic_domain_parameters( + dynamicDomainParameters: DynamicDomainParameters, + force: Boolean = false, + ): Unit = { + val protocolVersion = get_static_domain_parameters.protocolVersion + topology.domain_parameters_changes + .authorize(id, dynamicDomainParameters, protocolVersion, force = force) + .discard[ByteString] + } + + @Help.Summary("Update the Dynamic Domain Parameters for the domain") + @Help.Description( + """force: Enable potentially dangerous changes. Required to increase ``ledgerTimeRecordTimeTolerance``. + |Use ``set_ledger_time_record_time_tolerance_securely`` to securely increase ``ledgerTimeRecordTimeTolerance``.""" + ) + def update_dynamic_domain_parameters( + modifier: DynamicDomainParameters => DynamicDomainParameters, + force: Boolean = false, + ): Unit = { + val currentDomainParameters = get_dynamic_domain_parameters + val protocolVersion = get_static_domain_parameters.protocolVersion + + val newDomainParameters = modifier(currentDomainParameters) + topology.domain_parameters_changes + .authorize(id, newDomainParameters, protocolVersion, force = force) + .discard[ByteString] + } + + @Help.Summary("Try to update the reconciliation interval for the domain") + @Help.Description("""If the reconciliation interval is dynamic, update the value. + If the reconciliation interval is not dynamic (i.e., if the domain is running + on protocol version lower than `4`), then it will throw an error. + """) + def set_reconciliation_interval( + newReconciliationInterval: PositiveDurationSeconds + ): Unit = + update_dynamic_domain_parameters( + _.copy(reconciliationInterval = newReconciliationInterval) + ) + + @Help.Summary("Try to update the max rate per participant for the domain") + @Help.Description("""If the max rate per participant is dynamic, update the value. + If the max rate per participant is not dynamic (i.e., if the domain is running + on protocol version lower than `4`), then it will throw an error. + """) + def set_max_rate_per_participant( + maxRatePerParticipant: NonNegativeInt + ): Unit = + update_dynamic_domain_parameters(_.copy(maxRatePerParticipant = maxRatePerParticipant)) + + @Help.Summary("Try to update the max rate per participant for the domain") + @Help.Description("""If the max request size is dynamic, update the value. + The update won't have any effect unless the sequencer server is restarted. + If the max request size is not dynamic (i.e., if the domain is running + on protocol version lower than `4`), then it will throw an error. + """) + def set_max_request_size( + maxRequestSize: NonNegativeInt, + force: Boolean = false, + ): Unit = + TraceContext.withNewTraceContext { implicit tc => + if (maxRequestSize < minimumMaxRequestSizeBytes && !force) + logger.warn( + s"""|The maxRequestSize requested is lower than the minimum advised value ($minimumMaxRequestSizeBytes) which can crash Canton. + |To set this value anyway, set force to true.""".stripMargin + ) + else + update_dynamic_domain_parameters(_.copy(maxRequestSize = maxRequestSize)) + logger.info( + "Please restart the sequencer node to take into account the new value for max-request-size." + ) + } + + @Help.Summary( + "Update the `ledgerTimeRecordTimeTolerance` in the dynamic domain parameters." + ) + @Help.Description( + """If it would be insecure to perform the change immediately, + |the command will block and wait until it is secure to perform the change. + |The command will block for at most twice of ``newLedgerTimeRecordTimeTolerance``. + | + |If the domain does not support ``mediatorDeduplicationTimeout``, + |the method will update ``ledgerTimeRecordTimeTolerance`` immediately without blocking. + | + |The method will fail if ``mediatorDeduplicationTimeout`` is less than twice of ``newLedgerTimeRecordTimeTolerance``. + | + |Do not modify domain parameters concurrently while running this command, + |because the command may override concurrent changes. + | + |force: update ``ledgerTimeRecordTimeTolerance`` immediately without blocking. + |This is safe to do during domain bootstrapping and in test environments, but should not be done in operational production systems..""" + ) + def set_ledger_time_record_time_tolerance( + newLedgerTimeRecordTimeTolerance: NonNegativeFiniteDuration, + force: Boolean = false, + ): Unit = { + TraceContext.withNewTraceContext { implicit tc => + get_dynamic_domain_parameters match { + case oldDomainParameters: DynamicDomainParameters if !force => + securely_set_ledger_time_record_time_tolerance( + oldDomainParameters, + newLedgerTimeRecordTimeTolerance, + ) + + case _: DynamicDomainParameters => + logger.info( + s"Immediately updating ledgerTimeRecordTimeTolerance to $newLedgerTimeRecordTimeTolerance..." + ) + update_dynamic_domain_parameters( + _.update(ledgerTimeRecordTimeTolerance = newLedgerTimeRecordTimeTolerance), + force = true, + ) + } + } + } + + private def securely_set_ledger_time_record_time_tolerance( + oldDomainParameters: DynamicDomainParameters, + newLedgerTimeRecordTimeTolerance: NonNegativeFiniteDuration, + )(implicit traceContext: TraceContext): Unit = { + implicit val ec: ExecutionContext = consoleEnvironment.environment.executionContext + + // See i9028 for a detailed design. + // https://docs.google.com/document/d/1tpPbzv2s6bjbekVGBn6X5VZuw0oOTHek5c30CBo4UkI/edit#bookmark=id.1dzc6dxxlpca + // We wait until the antecedent of Lemma 2 Item 2 is falsified for all changes that violate the conclusion. + + // Compute new parameters + val oldLedgerTimeRecordTimeTolerance = oldDomainParameters.ledgerTimeRecordTimeTolerance + + val minMediatorDeduplicationTimeout = newLedgerTimeRecordTimeTolerance * 2 + + if (oldDomainParameters.mediatorDeduplicationTimeout < minMediatorDeduplicationTimeout) { + val err = IncreaseOfLedgerTimeRecordTimeTolerance + .PermanentlyInsecure( + newLedgerTimeRecordTimeTolerance.toInternal, + oldDomainParameters.mediatorDeduplicationTimeout.toInternal, + ) + val msg = CantonError.stringFromContext(err) + consoleEnvironment.run(GenericCommandError(msg)) + } + + logger.info( + s"Securely updating ledgerTimeRecordTimeTolerance to $newLedgerTimeRecordTimeTolerance..." + ) + + // Poll until it is safe to increase ledgerTimeRecordTimeTolerance + def checkPreconditions(): Future[Unit] = { + val startTs = consoleEnvironment.environment.clock.now + + // Update mediatorDeduplicationTimeout for several reasons: + // 1. Make sure it is big enough. + // 2. The resulting topology transaction gives us a meaningful lower bound on the sequencer clock. + logger.info( + s"Do a no-op update of ledgerTimeRecordTimeTolerance to $oldLedgerTimeRecordTimeTolerance..." + ) + update_dynamic_domain_parameters( + _.copy(ledgerTimeRecordTimeTolerance = oldLedgerTimeRecordTimeTolerance) + ) + + logger.debug(s"Check for incompatible past domain parameters...") + + val allTransactions = topology.domain_parameters_changes.list( + id.filterString, + useStateStore = false, + // We can't specify a lower bound in range because that would be compared against validFrom. + // (But we need to compare to validUntil). + TimeQuery.Range(None, None), + ) + + // This serves as a lower bound of validFrom for the next topology transaction. + val lastSequencerTs = + allTransactions + .map(_.context.validFrom) + .maxOption + .getOrElse(throw new NoSuchElementException("Missing domain parameters!")) + + logger.debug(s"Last sequencer timestamp is $lastSequencerTs.") + + // Determine how long we need to wait until all incompatible domainParameters have become + // invalid for at least minMediatorDeduplicationTimeout. + val waitDuration = allTransactions + .filterNot( + _.item.compatibleWithNewLedgerTimeRecordTimeTolerance(newLedgerTimeRecordTimeTolerance) + ) + .map { tx => + val elapsedForAtLeast = tx.context.validUntil match { + case Some(validUntil) => Duration.between(validUntil, lastSequencerTs) + case None => Duration.ZERO + } + minMediatorDeduplicationTimeout.asJava minus elapsedForAtLeast + } + .maxOption + .getOrElse(Duration.ZERO) + + if (waitDuration > Duration.ZERO) { + logger.info( + show"Found incompatible past domain parameters. Waiting for $waitDuration..." + ) + + // Use the clock instead of Threading.sleep to support sim clock based tests. + val delayF = consoleEnvironment.environment.clock + .scheduleAt( + _ => (), + startTs.plus(waitDuration), + ) // avoid scheduleAfter, because that causes a race condition in integration tests + .onShutdown( + throw new IllegalStateException( + "Update of ledgerTimeRecordTimeTolerance interrupted due to shutdown." + ) + ) + // Do not submit checkPreconditions() to the clock because it is blocking and would therefore block the clock. + delayF.flatMap(_ => checkPreconditions()) + } else { + Future.unit + } + } + + timeouts.unbounded.await("Wait until ledgerTimeRecordTimeTolerance can be increased.")( + checkPreconditions() + ) + + // Now that past values of mediatorDeduplicationTimeout have been large enough, + // we can change ledgerTimeRecordTimeTolerance. + logger.info( + s"Now changing ledgerTimeRecordTimeTolerance to $newLedgerTimeRecordTimeTolerance..." + ) + update_dynamic_domain_parameters( + _.copy(ledgerTimeRecordTimeTolerance = newLedgerTimeRecordTimeTolerance), + force = true, + ) + } + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/GrpcByteChunksToFileObserver.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/GrpcByteChunksToFileObserver.scala new file mode 100644 index 0000000000..ac10b041f3 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/GrpcByteChunksToFileObserver.scala @@ -0,0 +1,47 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console.commands + +import better.files.File +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.util.ResourceUtil +import com.google.protobuf.ByteString +import io.grpc.stub.StreamObserver + +import java.io.FileOutputStream +import scala.concurrent.Promise +import scala.language.reflectiveCalls +import scala.util.{Failure, Success, Try} + +private[commands] class GrpcByteChunksToFileObserver[ + T <: GrpcByteChunksToFileObserver.ByteStringChunk +]( + inputFile: File, + requestComplete: Promise[String], +) extends StreamObserver[T] { + private val os: FileOutputStream = inputFile.newFileOutputStream(append = false) + + override def onNext(value: T): Unit = { + Try(os.write(value.chunk.toByteArray)) match { + case Failure(exception) => + ResourceUtil.closeAndAddSuppressed(Some(exception), os) + throw exception + case Success(_) => // all good + } + } + + override def onError(t: Throwable): Unit = { + requestComplete.tryFailure(t).discard + ResourceUtil.closeAndAddSuppressed(None, os) + } + + override def onCompleted(): Unit = { + requestComplete.trySuccess(inputFile.pathAsString).discard + ResourceUtil.closeAndAddSuppressed(None, os) + } +} + +private[commands] object GrpcByteChunksToFileObserver { + type ByteStringChunk = { val chunk: ByteString } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/HealthAdministration.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/HealthAdministration.scala new file mode 100644 index 0000000000..70caa7d737 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/HealthAdministration.scala @@ -0,0 +1,180 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console.commands + +import better.files.File +import com.digitalasset.canton.admin.api.client.commands.{ + StatusAdminCommands, + TopologyAdminCommands, + TopologyAdminCommandsX, +} +import com.digitalasset.canton.config.{ConsoleCommandTimeout, NonNegativeDuration} +import com.digitalasset.canton.console.CommandErrors.{CommandError, GenericCommandError} +import com.digitalasset.canton.console.ConsoleMacros.utils +import com.digitalasset.canton.console.{ + AdminCommandRunner, + CantonHealthAdministration, + CommandErrors, + CommandSuccessful, + ConsoleCommandResult, + ConsoleEnvironment, + Help, + Helpful, +} +import com.digitalasset.canton.health.admin.data.NodeStatus +import com.digitalasset.canton.health.admin.v0.HealthDumpChunk +import com.digitalasset.canton.health.admin.{data, v0} +import com.digitalasset.canton.networking.grpc.GrpcError +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.util.ResourceUtil +import io.grpc.StatusRuntimeException + +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.{Await, Promise, TimeoutException} + +abstract class HealthAdministrationCommon[S <: data.NodeStatus.Status]( + runner: AdminCommandRunner, + consoleEnvironment: ConsoleEnvironment, + deserialize: v0.NodeStatus.Status => ParsingResult[S], +) extends Helpful { + private val initializedCache = new AtomicReference[Boolean](false) + private def timeouts: ConsoleCommandTimeout = consoleEnvironment.commandTimeouts + + import runner.* + + @Help.Summary("Get human (and machine) readable status info") + def status: data.NodeStatus[S] = consoleEnvironment.run { + CommandSuccessful(adminCommand(new StatusAdminCommands.GetStatus[S](deserialize)) match { + case CommandSuccessful(success) => success + case err: CommandError => data.NodeStatus.Failure(err.cause) + }) + } + + @Help.Summary("Returns true if the node has an identity") + def has_identity(): Boolean + + @Help.Summary("Wait for the node to have an identity") + @Help.Description( + """This is specifically useful for the Domain Manager which needs its identity to be ready for bootstrapping, + | but for which we can't rely on wait_for_initialized() because it will be initialized only after being bootstrapped.""" + ) + def wait_for_identity(): Unit = waitFor(has_identity()) + + @Help.Summary( + "Creates a zip file containing diagnostic information about the canton process running this node" + ) + def dump( + outputFile: File = CantonHealthAdministration.defaultHealthDumpName, + timeout: NonNegativeDuration = timeouts.ledgerCommand, + chunkSize: Option[Int] = None, + ): String = consoleEnvironment.run { + val requestComplete = Promise[String]() + val responseObserver = + new GrpcByteChunksToFileObserver[HealthDumpChunk](outputFile, requestComplete) + + def call = consoleEnvironment.run { + adminCommand(new StatusAdminCommands.GetHealthDump(responseObserver, chunkSize)) + } + + try { + ResourceUtil.withResource(call) { _ => + CommandSuccessful( + Await.result(requestComplete.future, timeout.duration) + ) + } + } catch { + case sre: StatusRuntimeException => + GenericCommandError(GrpcError("Generating health dump file", "dump", sre).toString) + case _: TimeoutException => + outputFile.delete(swallowIOExceptions = true) + CommandErrors.ConsoleTimeout.Error(timeout.asJavaApproximation) + } + } + + private def runningCommand = + adminCommand( + StatusAdminCommands.IsRunning + ) + private def initializedCommand = + adminCommand( + StatusAdminCommands.IsInitialized + ) + + def falseIfUnreachable(command: ConsoleCommandResult[Boolean]): Boolean = + consoleEnvironment.run(CommandSuccessful(command match { + case CommandSuccessful(result) => result + case _: CommandError => false + })) + + @Help.Summary("Check if the node is running") + def running(): Boolean = + // in case the node is not reachable, we assume it is not running + falseIfUnreachable(runningCommand) + + @Help.Summary("Check if the node is running and is the active instance (mediator, participant)") + def active: Boolean = status match { + case NodeStatus.Success(status) => status.active + case NodeStatus.NotInitialized(active) => active + case _ => false + } + + @Help.Summary("Returns true if node has been initialized.") + def initialized(): Boolean = initializedCache.updateAndGet { + case false => + // in case the node is not reachable, we cannot assume it is not initialized, because it could have been initialized in the past + // and it's simply not running at the moment. so we'll allow the command to throw an error here + consoleEnvironment.run(initializedCommand) + case x => x + } + + @Help.Summary("Wait for the node to be running") + def wait_for_running(): Unit = waitFor(running()) + + @Help.Summary("Wait for the node to be initialized") + def wait_for_initialized(): Unit = { + waitFor(initializedCache.updateAndGet { + case false => + // in case the node is not reachable, we return false instead of throwing an error in order to keep retrying + falseIfUnreachable(initializedCommand) + case x => x + }) + } + + protected def waitFor(condition: => Boolean): Unit = { + // all calls here are potentially unbounded. we do not know how long it takes + // for a node to start or for a node to become initialised. so we use the unbounded + // timeout + utils.retry_until_true(timeout = consoleEnvironment.commandTimeouts.unbounded)(condition) + } +} + +class HealthAdministration[S <: data.NodeStatus.Status]( + runner: AdminCommandRunner, + consoleEnvironment: ConsoleEnvironment, + deserialize: v0.NodeStatus.Status => ParsingResult[S], +) extends HealthAdministrationCommon[S](runner, consoleEnvironment, deserialize) { + + override def has_identity(): Boolean = runner + .adminCommand( + TopologyAdminCommands.Init.GetId() + ) + .toEither + .isRight + +} + +class HealthAdministrationX[S <: data.NodeStatus.Status]( + runner: AdminCommandRunner, + consoleEnvironment: ConsoleEnvironment, + deserialize: v0.NodeStatus.Status => ParsingResult[S], +) extends HealthAdministrationCommon[S](runner, consoleEnvironment, deserialize) { + + override def has_identity(): Boolean = runner + .adminCommand( + TopologyAdminCommandsX.Init.GetId() + ) + .toEither + .isRight + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala new file mode 100644 index 0000000000..1d367d0ad4 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala @@ -0,0 +1,3282 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console.commands + +import cats.syntax.foldable.* +import cats.syntax.functorFilter.* +import cats.syntax.traverse.* +import com.daml.jwt.JwtDecoder +import com.daml.jwt.domain.Jwt +import com.daml.ledger.api.v1.admin.package_management_service.PackageDetails +import com.daml.ledger.api.v1.admin.party_management_service.PartyDetails as ProtoPartyDetails +import com.daml.ledger.api.v1.command_completion_service.Checkpoint +import com.daml.ledger.api.v1.commands.{Command, DisclosedContract} +import com.daml.ledger.api.v1.completion.Completion +import com.daml.ledger.api.v1.event.CreatedEvent +import com.daml.ledger.api.v1.event_query_service.{ + GetEventsByContractIdResponse, + GetEventsByContractKeyResponse, +} +import com.daml.ledger.api.v1.ledger_configuration_service.LedgerConfiguration +import com.daml.ledger.api.v1.ledger_offset.LedgerOffset +import com.daml.ledger.api.v1.transaction.{Transaction, TransactionTree} +import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter} +import com.daml.ledger.api.v1.value.Value +import com.daml.ledger.api.v1.{EventQueryServiceOuterClass, ValueOuterClass} +import com.daml.ledger.api.v2.event_query_service.GetEventsByContractIdResponse as GetEventsByContractIdResponseV2 +import com.daml.ledger.api.v2.participant_offset.ParticipantOffset +import com.daml.ledger.api.v2.state_service.{ + GetActiveContractsResponse, + GetConnectedDomainsResponse, +} +import com.daml.ledger.api.v2.transaction.{ + Transaction as TransactionV2, + TransactionTree as TransactionTreeV2, +} +import com.daml.ledger.api.v2.transaction_filter.TransactionFilter as TransactionFilterV2 +import com.daml.ledger.{api, javaapi as javab} +import com.daml.lf.data.Ref +import com.daml.metrics.api.MetricHandle.{Histogram, Meter} +import com.daml.metrics.api.{MetricHandle, MetricName, MetricsContext} +import com.daml.scalautil.Statement.discard +import com.digitalasset.canton.admin.api.client.commands.LedgerApiTypeWrappers.WrappedCreatedEvent +import com.digitalasset.canton.admin.api.client.commands.LedgerApiV2Commands.CompletionWrapper +import com.digitalasset.canton.admin.api.client.commands.LedgerApiV2Commands.UpdateService.{ + AssignedWrapper, + ReassignmentWrapper, + TransactionTreeWrapper, + TransactionWrapper, + UnassignedWrapper, + UpdateTreeWrapper, + UpdateWrapper, +} +import com.digitalasset.canton.admin.api.client.commands.{ + LedgerApiCommands, + LedgerApiV2Commands, + ParticipantAdminCommands, +} +import com.digitalasset.canton.admin.api.client.data.* +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.config.{ConsoleCommandTimeout, NonNegativeDuration} +import com.digitalasset.canton.console.CommandErrors.GenericCommandError +import com.digitalasset.canton.console.{ + AdminCommandRunner, + CommandSuccessful, + ConsoleEnvironment, + ConsoleMacros, + FeatureFlag, + FeatureFlagFilter, + Help, + Helpful, + LedgerApiCommandRunner, + LocalParticipantReferenceCommon, + ParticipantReferenceCommon, + RemoteParticipantReferenceCommon, +} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.ledger.api.auth.{ + AuthServiceJWTCodec, + CustomDamlJWTPayload, + StandardJWTPayload, +} +import com.digitalasset.canton.ledger.api.domain.{ + IdentityProviderConfig, + IdentityProviderId, + JwksUrl, +} +import com.digitalasset.canton.ledger.api.{DeduplicationPeriod, domain} +import com.digitalasset.canton.ledger.client.services.admin.IdentityProviderConfigClient +import com.digitalasset.canton.logging.NamedLogging +import com.digitalasset.canton.networking.grpc.{GrpcError, RecordingStreamObserver} +import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil +import com.digitalasset.canton.protocol.LfContractId +import com.digitalasset.canton.topology.{DomainId, ParticipantId, PartyId} +import com.digitalasset.canton.tracing.NoTracing +import com.digitalasset.canton.util.ResourceUtil +import com.digitalasset.canton.{LedgerTransactionId, LfPartyId, config} +import com.google.protobuf.field_mask.FieldMask +import io.grpc.StatusRuntimeException +import io.grpc.stub.StreamObserver + +import java.time.Instant +import java.util.UUID +import java.util.concurrent.TimeoutException +import java.util.concurrent.atomic.AtomicReference +import scala.annotation.nowarn +import scala.concurrent.{Await, ExecutionContext} +import scala.util.{Failure, Success, Try} + +trait BaseLedgerApiAdministration extends NoTracing { + thisAdministration: LedgerApiCommandRunner & NamedLogging & FeatureFlagFilter => + + implicit protected[canton] lazy val executionContext: ExecutionContext = + consoleEnvironment.environment.executionContext + + implicit protected val consoleEnvironment: ConsoleEnvironment + + protected val name: String + + protected lazy val applicationId: String = token + .flatMap { encodedToken => JwtDecoder.decode(Jwt(encodedToken)).toOption } + .flatMap(decodedToken => AuthServiceJWTCodec.readFromString(decodedToken.payload).toOption) + .map { + case s: StandardJWTPayload => s.userId + case c: CustomDamlJWTPayload => + c.applicationId.getOrElse(LedgerApiCommands.defaultApplicationId) + } + .getOrElse(LedgerApiCommands.defaultApplicationId) + + protected def domainOfTransaction(transactionId: String): DomainId + protected def optionallyAwait[Tx]( + tx: Tx, + txId: String, + optTimeout: Option[config.NonNegativeDuration], + ): Tx + private def timeouts: ConsoleCommandTimeout = consoleEnvironment.commandTimeouts + protected def defaultLimit: PositiveInt = + consoleEnvironment.environment.config.parameters.console.defaultLimit + + // TODO(#15280) after no usage of ledger_api, that should be deleted and this should be renamed to ledger_api, additionally summary in following line needs to change + @Help.Summary("Group of commands that access the ledger-api V2", FeatureFlag.Testing) + @Help.Group("Ledger Api") + object ledger_api_v2 extends Helpful { + + @Help.Summary("Read from update stream", FeatureFlag.Testing) + @Help.Group("Updates") + object updates extends Helpful { + + @Help.Summary("Get update trees", FeatureFlag.Testing) + @Help.Description( + """This function connects to the update tree stream for the given parties and collects update trees + |until either `completeAfter` update trees have been received or `timeout` has elapsed. + |The returned update trees can be filtered to be between the given offsets (default: no filtering). + |If the participant has been pruned via `pruning.prune` and if `beginOffset` is lower than the pruning offset, + |this command fails with a `NOT_FOUND` error.""" + ) + def trees( + partyIds: Set[PartyId], + completeAfter: Int, + beginOffset: ParticipantOffset = new ParticipantOffset().withBoundary( + ParticipantOffset.ParticipantBoundary.PARTICIPANT_BEGIN + ), + endOffset: Option[ParticipantOffset] = None, + verbose: Boolean = true, + timeout: config.NonNegativeDuration = timeouts.ledgerCommand, + resultFilter: UpdateTreeWrapper => Boolean = _ => true, + ): Seq[UpdateTreeWrapper] = check(FeatureFlag.Testing)({ + val observer = new RecordingStreamObserver[UpdateTreeWrapper](completeAfter, resultFilter) + val filter = TransactionFilterV2(partyIds.map(_.toLf -> Filters()).toMap) + mkResult( + subscribe_trees(observer, filter, beginOffset, endOffset, verbose), + "getUpdateTrees", + observer, + timeout, + ) + }) + + private def mkResult[Res]( + call: => AutoCloseable, + requestDescription: String, + observer: RecordingStreamObserver[Res], + timeout: config.NonNegativeDuration, + ): Seq[Res] = consoleEnvironment.run { + try { + ResourceUtil.withResource(call) { _ => + // Not doing noisyAwaitResult here, because we don't want to log warnings in case of a timeout. + CommandSuccessful(Await.result(observer.result, timeout.duration)) + } + } catch { + case sre: StatusRuntimeException => + GenericCommandError(GrpcError(requestDescription, name, sre).toString) + case _: TimeoutException => CommandSuccessful(observer.responses) + } + } + + @Help.Summary("Subscribe to the update tree stream", FeatureFlag.Testing) + @Help.Description( + """This function connects to the update tree stream and passes update trees to `observer` until + |the stream is completed. + |Only update trees for parties in `filter.filterByParty.keys` will be returned. + |Use `filter = TransactionFilter(Map(myParty.toLf -> Filters()))` to return all trees for `myParty: PartyId`. + |The returned updates can be filtered to be between the given offsets (default: no filtering). + |If the participant has been pruned via `pruning.prune` and if `beginOffset` is lower than the pruning offset, + |this command fails with a `NOT_FOUND` error.""" + ) + def subscribe_trees( + observer: StreamObserver[UpdateTreeWrapper], + filter: TransactionFilterV2, + beginOffset: ParticipantOffset = new ParticipantOffset().withBoundary( + ParticipantOffset.ParticipantBoundary.PARTICIPANT_BEGIN + ), + endOffset: Option[ParticipantOffset] = None, + verbose: Boolean = true, + ): AutoCloseable = { + check(FeatureFlag.Testing)( + consoleEnvironment.run { + ledgerApiCommand( + LedgerApiV2Commands.UpdateService.SubscribeTrees( + observer, + beginOffset, + endOffset, + filter, + verbose, + ) + ) + } + ) + } + + @Help.Summary("Get flat updates", FeatureFlag.Testing) + @Help.Description( + """This function connects to the flat update stream for the given parties and collects updates + |until either `completeAfter` flat updates have been received or `timeout` has elapsed. + |The returned updates can be filtered to be between the given offsets (default: no filtering). + |If the participant has been pruned via `pruning.prune` and if `beginOffset` is lower than the pruning offset, + |this command fails with a `NOT_FOUND` error. If you need to specify filtering conditions for template IDs and + |including create event blobs for explicit disclosure, consider using `flat_with_tx_filter`.""" + ) + def flat( + partyIds: Set[PartyId], + completeAfter: Int, + beginOffset: ParticipantOffset = new ParticipantOffset().withBoundary( + ParticipantOffset.ParticipantBoundary.PARTICIPANT_BEGIN + ), + endOffset: Option[ParticipantOffset] = None, + verbose: Boolean = true, + timeout: config.NonNegativeDuration = timeouts.ledgerCommand, + resultFilter: UpdateWrapper => Boolean = _ => true, + ): Seq[UpdateWrapper] = check(FeatureFlag.Testing)({ + val observer = new RecordingStreamObserver[UpdateWrapper](completeAfter, resultFilter) + val filter = TransactionFilterV2(partyIds.map(_.toLf -> Filters()).toMap) + mkResult( + subscribe_flat(observer, filter, beginOffset, endOffset, verbose), + "getUpdates", + observer, + timeout, + ) + }) + + @Help.Summary("Get flat updates", FeatureFlag.Testing) + @Help.Description( + """This function connects to the flat update stream for the given transaction filter and collects updates + |until either `completeAfter` transactions have been received or `timeout` has elapsed. + |The returned transactions can be filtered to be between the given offsets (default: no filtering). + |If the participant has been pruned via `pruning.prune` and if `beginOffset` is lower than the pruning offset, + |this command fails with a `NOT_FOUND` error. If you only need to filter by a set of parties, consider using + |`flat` instead.""" + ) + def flat_with_tx_filter( + filter: TransactionFilterV2, + completeAfter: Int, + beginOffset: ParticipantOffset = new ParticipantOffset().withBoundary( + ParticipantOffset.ParticipantBoundary.PARTICIPANT_BEGIN + ), + endOffset: Option[ParticipantOffset] = None, + verbose: Boolean = true, + timeout: config.NonNegativeDuration = timeouts.ledgerCommand, + resultFilter: UpdateWrapper => Boolean = _ => true, + ): Seq[UpdateWrapper] = check(FeatureFlag.Testing)({ + val observer = new RecordingStreamObserver[UpdateWrapper](completeAfter, resultFilter) + mkResult( + subscribe_flat(observer, filter, beginOffset, endOffset, verbose), + "getUpdates", + observer, + timeout, + ) + }) + + @Help.Summary("Subscribe to the flat update stream", FeatureFlag.Testing) + @Help.Description("""This function connects to the flat update stream and passes updates to `observer` until + |the stream is completed. + |Only updates for parties in `filter.filterByParty.keys` will be returned. + |Use `filter = TransactionFilter(Map(myParty.toLf -> Filters()))` to return all updates for `myParty: PartyId`. + |The returned updates can be filtered to be between the given offsets (default: no filtering). + |If the participant has been pruned via `pruning.prune` and if `beginOffset` is lower than the pruning offset, + |this command fails with a `NOT_FOUND` error.""") + def subscribe_flat( + observer: StreamObserver[UpdateWrapper], + filter: TransactionFilterV2, + beginOffset: ParticipantOffset = new ParticipantOffset().withBoundary( + ParticipantOffset.ParticipantBoundary.PARTICIPANT_BEGIN + ), + endOffset: Option[ParticipantOffset] = None, + verbose: Boolean = true, + ): AutoCloseable = { + check(FeatureFlag.Testing)( + consoleEnvironment.run { + ledgerApiCommand( + LedgerApiV2Commands.UpdateService.SubscribeFlat( + observer, + beginOffset, + endOffset, + filter, + verbose, + ) + ) + } + ) + } + + @Help.Summary("Starts measuring throughput at the update service", FeatureFlag.Testing) + @Help.Description( + """This function will subscribe on behalf of `parties` to the update tree stream and + |notify various metrics: + |The metric `.` counts the number of update trees emitted. + |The metric `.-tx-node-count` tracks the number of root events emitted as part of update trees. + |The metric `.-tx-size` tracks the number of bytes emitted as part of update trees. + | + |To stop measuring, you need to close the returned `AutoCloseable`. + |Use the `onUpdate` parameter to register a callback that is called on every update tree.""" + ) + def start_measuring( + parties: Set[PartyId], + metricSuffix: String, + onUpdate: UpdateTreeWrapper => Unit = _ => (), + )(implicit consoleEnvironment: ConsoleEnvironment): AutoCloseable = + check(FeatureFlag.Testing) { + + val metricName = MetricName(name, metricSuffix) + + val observer: StreamObserver[UpdateTreeWrapper] = new StreamObserver[UpdateTreeWrapper] { + + @nowarn("cat=deprecation") + val metricsFactory: MetricHandle.MetricsFactory = + consoleEnvironment.environment.metricsFactory.metricsFactory + + val metric: Meter = metricsFactory.meter(metricName) + val nodeCount: Histogram = metricsFactory.histogram(metricName :+ "tx-node-count") + val transactionSize: Histogram = metricsFactory.histogram(metricName :+ "tx-size") + + override def onNext(tree: UpdateTreeWrapper): Unit = { + val (s, serializedSize) = tree match { + case TransactionTreeWrapper(transactionTree) => + transactionTree.rootEventIds.size.toLong -> transactionTree.serializedSize + case reassignmentWrapper: ReassignmentWrapper => + 1L -> reassignmentWrapper.reassignment.serializedSize + } + metric.mark(s)(MetricsContext.Empty) + nodeCount.update(s) + transactionSize.update(serializedSize)(MetricsContext.Empty) + onUpdate(tree) + } + + override def onError(t: Throwable): Unit = t match { + case t: StatusRuntimeException => + val err = GrpcError("start_measuring", name, t) + err match { + case gaveUp: GrpcError.GrpcClientGaveUp if gaveUp.isClientCancellation => + logger.info(s"Client cancelled measuring throughput (metric: $metricName).") + case _ => + logger.warn( + s"An error occurred while measuring throughput (metric: $metricName). Stop measuring. $err" + ) + } + case _: Throwable => + logger.warn( + s"An exception occurred while measuring throughput (metric: $metricName). Stop measuring.", + t, + ) + } + + override def onCompleted(): Unit = + logger.info(s"Stop measuring throughput (metric: $metricName).") + } + + val filterParty = TransactionFilterV2(parties.map(_.toLf -> Filters()).toMap) + + logger.info(s"Start measuring throughput (metric: $metricName).") + subscribe_trees(observer, filterParty, state.end(), verbose = false) + } + + @Help.Summary("Get a (tree) transaction by its ID", FeatureFlag.Testing) + @Help.Description( + """Get a transaction tree from the update stream by its ID. Returns None if the transaction is not (yet) + |known at the participant or if the transaction has been pruned via `pruning.prune`.""" + ) + def by_id(parties: Set[PartyId], id: String): Option[TransactionTreeV2] = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiV2Commands.UpdateService.GetTransactionById(parties.map(_.toLf), id)( + consoleEnvironment.environment.executionContext + ) + ) + }) + } + + @Help.Summary("Submit commands", FeatureFlag.Testing) + @Help.Group("Command Submission") + object commands extends Helpful { + + @Help.Summary( + "Submit command and wait for the resulting transaction, returning the transaction tree or failing otherwise", + FeatureFlag.Testing, + ) + @Help.Description( + """Submits a command on behalf of the `actAs` parties, waits for the resulting transaction to commit and returns it. + | If the timeout is set, it also waits for the transaction to appear at all other configured + | participants who were involved in the transaction. The call blocks until the transaction commits or fails; + | the timeout only specifies how long to wait at the other participants. + | Fails if the transaction doesn't commit, or if it doesn't become visible to the involved participants in + | the allotted time. + | Note that if the optTimeout is set and the involved parties are concurrently enabled/disabled or their + | participants are connected/disconnected, the command may currently result in spurious timeouts or may + | return before the transaction appears at all the involved participants.""" + ) + def submit( + actAs: Seq[PartyId], + commands: Seq[Command], + domainId: DomainId, + workflowId: String = "", + commandId: String = "", + // TODO(#15280) This feature wont work after V1 is removed. Also after witness blinding is implemented, the underlying algorith will be broken. Idea: drop this feature and wait explicitly with some additional tooling. + optTimeout: Option[NonNegativeDuration] = Some(timeouts.ledgerCommand), + deduplicationPeriod: Option[DeduplicationPeriod] = None, + submissionId: String = "", + minLedgerTimeAbs: Option[Instant] = None, + readAs: Seq[PartyId] = Seq.empty, + disclosedContracts: Seq[DisclosedContract] = Seq.empty, + applicationId: String = applicationId, + ): TransactionTreeV2 = check(FeatureFlag.Testing) { + val tx = consoleEnvironment.run { + ledgerApiCommand( + LedgerApiV2Commands.CommandService.SubmitAndWaitTransactionTree( + actAs.map(_.toLf), + readAs.map(_.toLf), + commands, + workflowId, + commandId, + deduplicationPeriod, + submissionId, + minLedgerTimeAbs, + disclosedContracts, + domainId, + applicationId, + ) + ) + } + optionallyAwait(tx, tx.updateId, optTimeout) + } + + @Help.Summary( + "Submit command and wait for the resulting transaction, returning the flattened transaction or failing otherwise", + FeatureFlag.Testing, + ) + @Help.Description( + """Submits a command on behalf of the `actAs` parties, waits for the resulting transaction to commit, and returns the "flattened" transaction. + | If the timeout is set, it also waits for the transaction to appear at all other configured + | participants who were involved in the transaction. The call blocks until the transaction commits or fails; + | the timeout only specifies how long to wait at the other participants. + | Fails if the transaction doesn't commit, or if it doesn't become visible to the involved participants in + | the allotted time. + | Note that if the optTimeout is set and the involved parties are concurrently enabled/disabled or their + | participants are connected/disconnected, the command may currently result in spurious timeouts or may + | return before the transaction appears at all the involved participants.""" + ) + def submit_flat( + actAs: Seq[PartyId], + commands: Seq[Command], + domainId: DomainId, + workflowId: String = "", + commandId: String = "", + // TODO(#15280) This feature wont work after V1 is removed. Also after witness blinding is implemented, the underlying algorith will be broken. Idea: drop this feature and wait explicitly with some additional tooling. + optTimeout: Option[NonNegativeDuration] = Some(timeouts.ledgerCommand), + deduplicationPeriod: Option[DeduplicationPeriod] = None, + submissionId: String = "", + minLedgerTimeAbs: Option[Instant] = None, + readAs: Seq[PartyId] = Seq.empty, + disclosedContracts: Seq[DisclosedContract] = Seq.empty, + applicationId: String = applicationId, + ): TransactionV2 = check(FeatureFlag.Testing) { + val tx = consoleEnvironment.run { + ledgerApiCommand( + LedgerApiV2Commands.CommandService.SubmitAndWaitTransaction( + actAs.map(_.toLf), + readAs.map(_.toLf), + commands, + workflowId, + commandId, + deduplicationPeriod, + submissionId, + minLedgerTimeAbs, + disclosedContracts, + domainId, + applicationId, + ) + ) + } + optionallyAwait(tx, tx.updateId, optTimeout) + } + + @Help.Summary("Submit command asynchronously", FeatureFlag.Testing) + @Help.Description( + """Provides access to the command submission service of the Ledger API. + |See https://docs.daml.com/app-dev/services.html for documentation of the parameters.""" + ) + def submit_async( + actAs: Seq[PartyId], + commands: Seq[Command], + domainId: DomainId, + workflowId: String = "", + commandId: String = "", + deduplicationPeriod: Option[DeduplicationPeriod] = None, + submissionId: String = "", + minLedgerTimeAbs: Option[Instant] = None, + readAs: Seq[PartyId] = Seq.empty, + disclosedContracts: Seq[DisclosedContract] = Seq.empty, + applicationId: String = applicationId, + ): Unit = check(FeatureFlag.Testing) { + consoleEnvironment.run { + ledgerApiCommand( + LedgerApiV2Commands.CommandSubmissionService.Submit( + actAs.map(_.toLf), + readAs.map(_.toLf), + commands, + workflowId, + commandId, + deduplicationPeriod, + submissionId, + minLedgerTimeAbs, + disclosedContracts, + domainId, + applicationId, + ) + ) + } + } + + @Help.Summary( + "Submit assign command and wait for the resulting reassignment, returning the reassignment or failing otherwise", + FeatureFlag.Testing, + ) + @Help.Description( + """Submits a assignment command on behalf of `submitter` party, waits for the resulting assignment to commit, and returns the reassignment. + | If waitForParticipants is set, it also waits for the reassignment(s) to appear at all other configured + | participants who were involved in the assignment. The call blocks until the assignment commits or fails. + | Fails if the assignment doesn't commit, or if it doesn't become visible to the involved participants in time. + | Timout specifies the time how long to wait until the reassignment appears in the update stream for the submitting and all the specified participants. + | The unassignId should be the one returned by the corresponding submit_unassign command.""" + ) + def submit_assign( + submitter: PartyId, + unassignId: String, + source: DomainId, + target: DomainId, + workflowId: String = "", + applicationId: String = applicationId, + submissionId: String = UUID.randomUUID().toString, + waitForParticipants: Map[ParticipantReferenceCommon, PartyId] = Map.empty, + timeout: config.NonNegativeDuration = timeouts.ledgerCommand, + ): AssignedWrapper = + submitReassignment(submitter, waitForParticipants, timeout)(commandId => + submit_assign_async( + submitter = submitter, + unassignId = unassignId, + source = source, + target = target, + workflowId = workflowId, + applicationId = applicationId, + commandId = commandId, + submissionId = submissionId, + ) + ) match { + case assigned: AssignedWrapper => assigned + case invalid => + throw new IllegalStateException(s"AssignedWrapper expected, but got: $invalid") + } + + @Help.Summary( + "Submit assign command and wait for the resulting reassignment, returning the reassignment or failing otherwise", + FeatureFlag.Testing, + ) + @Help.Description( + """Submits an unassignment command on behalf of `submitter` party, waits for the resulting unassignment to commit, and returns the reassignment. + | If waitForParticipants is set, it also waits for the reassignment(s) to appear at all other configured + | participants who were involved in the unassignment. The call blocks until the unassignment commits or fails. + | Fails if the unassignment doesn't commit, or if it doesn't become visible to the involved participants in time. + | Timout specifies the time how long to wait until the reassignment appears in the update stream for the submitting and all the specified participants.""" + ) + def submit_unassign( + submitter: PartyId, + contractId: LfContractId, + source: DomainId, + target: DomainId, + workflowId: String = "", + applicationId: String = applicationId, + submissionId: String = UUID.randomUUID().toString, + waitForParticipants: Map[ParticipantReferenceCommon, PartyId] = Map.empty, + timeout: config.NonNegativeDuration = timeouts.ledgerCommand, + ): UnassignedWrapper = + submitReassignment(submitter, waitForParticipants, timeout)(commandId => + submit_unassign_async( + submitter = submitter, + contractId = contractId, + source = source, + target = target, + workflowId = workflowId, + applicationId = applicationId, + commandId = commandId, + submissionId = submissionId, + ) + ) match { + case unassigned: UnassignedWrapper => unassigned + case invalid => + throw new IllegalStateException(s"UnassignedWrapper expected, but got: $invalid") + } + + // TODO(#15429) this could be improved to use pointwise lookups similarly to submit as soon as the pointwise lookups + // for reassignments are available over the Ladger API. + private def submitReassignment( + submitter: PartyId, + waitForParticipants: Map[ParticipantReferenceCommon, PartyId] = Map.empty, + timeout: config.NonNegativeDuration, + )(submit: String => Unit): ReassignmentWrapper = { + val commandId = UUID.randomUUID().toString + val ledgerEndBefore = state.end() + val participants = waitForParticipants.view.map { case (participant, partyId) => + participant -> (partyId, participant.ledger_api_v2.state.end()) + }.toMap + submit(commandId) + val completionUpdateId = completions + .list( + partyId = submitter, + atLeastNumCompletions = 1, + beginOffset = ledgerEndBefore, + filter = _.completion.commandId == commandId, + )(0) + .completion + .updateId + participants.foreach { case (participant, (queryingParty, from)) => + discard(waitForUpdateId(participant, from, queryingParty, completionUpdateId, timeout)) + } + waitForUpdateId( + thisAdministration, + ledgerEndBefore, + submitter, + completionUpdateId, + timeout, + ) match { + case result: ReassignmentWrapper => result + case _ => throw new IllegalStateException("ReassignmentWrapper expected") + } + } + + @Help.Summary("Submit assign command asynchronously", FeatureFlag.Testing) + @Help.Description( + """Provides access to the command submission service of the Ledger API. + |See https://docs.daml.com/app-dev/services.html for documentation of the parameters.""" + ) + def submit_assign_async( + submitter: PartyId, + unassignId: String, + source: DomainId, + target: DomainId, + workflowId: String = "", + applicationId: String = applicationId, + commandId: String = UUID.randomUUID().toString, + submissionId: String = UUID.randomUUID().toString, + ): Unit = check(FeatureFlag.Testing) { + consoleEnvironment.run { + ledgerApiCommand( + LedgerApiV2Commands.CommandSubmissionService.SubmitAssignCommand( + workflowId = workflowId, + applicationId = applicationId, + commandId = commandId, + submitter = submitter.toLf, + submissionId = submissionId, + unassignId = unassignId, + source = source, + target = target, + ) + ) + } + } + + @Help.Summary("Submit unassign command asynchronously", FeatureFlag.Testing) + @Help.Description( + """Provides access to the command submission service of the Ledger API. + |See https://docs.daml.com/app-dev/services.html for documentation of the parameters.""" + ) + def submit_unassign_async( + submitter: PartyId, + contractId: LfContractId, + source: DomainId, + target: DomainId, + workflowId: String = "", + applicationId: String = applicationId, + commandId: String = UUID.randomUUID().toString, + submissionId: String = UUID.randomUUID().toString, + ): Unit = check(FeatureFlag.Testing) { + consoleEnvironment.run { + ledgerApiCommand( + LedgerApiV2Commands.CommandSubmissionService.SubmitUnassignCommand( + workflowId = workflowId, + applicationId = applicationId, + commandId = commandId, + submitter = submitter.toLf, + submissionId = submissionId, + contractId = contractId, + source = source, + target = target, + ) + ) + } + } + } + + @Help.Summary("Collection of Ledger API state endpoints", FeatureFlag.Testing) + @Help.Group("State") + object state extends Helpful { + + @Help.Summary("Read the current ledger end offset", FeatureFlag.Testing) + def end(): ParticipantOffset = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiV2Commands.StateService.LedgerEnd() + ) + }) + + @Help.Summary("Read the current connected domains for a party", FeatureFlag.Testing) + def connectedDomains(partyId: PartyId): GetConnectedDomainsResponse = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiV2Commands.StateService.GetConnectedDomains(partyId.toLf) + ) + }) + + @Help.Summary("Read active contracts", FeatureFlag.Testing) + @Help.Group("Active Contracts") + object acs extends Helpful { + @Help.Summary("List the set of active contracts of a given party", FeatureFlag.Testing) + @Help.Description( + """This command will return the current set of active contracts and incomplete reassignments for the given party. + | + |Supported arguments: + |- party: for which party you want to load the acs + |- limit: limit (default set via canton.parameter.console) + |- verbose: whether the resulting events should contain detailed type information + |- filterTemplate: list of templates ids to filter for, empty sequence acts as a wildcard + |- timeout: the maximum wait time for the complete acs to arrive + |- includeCreatedEventBlob: whether the result should contain the createdEventBlobs, it works only + | if the filterTemplate is non-empty""" + ) + def of_party( + party: PartyId, + limit: PositiveInt = defaultLimit, + verbose: Boolean = true, + filterTemplates: Seq[TemplateId] = Seq.empty, + activeAtOffset: String = "", + timeout: config.NonNegativeDuration = timeouts.unbounded, + includeCreatedEventBlob: Boolean = false, + ): Seq[GetActiveContractsResponse] = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiV2Commands.StateService + .GetActiveContracts( + Set(party.toLf), + limit, + filterTemplates, + activeAtOffset, + verbose, + timeout.asFiniteApproximation, + includeCreatedEventBlob, + )(consoleEnvironment.environment.scheduler) + ) + }) + + @Help.Summary( + "List the set of active contracts for all parties hosted on this participant", + FeatureFlag.Testing, + ) + @Help.Description( + """This command will return the current set of active contracts for all parties. + + Supported arguments: + - limit: limit (default set via canton.parameter.console) + - verbose: whether the resulting events should contain detailed type information + - filterTemplate: list of templates ids to filter for, empty sequence acts as a wildcard + - timeout: the maximum wait time for the complete acs to arrive + - identityProviderId: limit the response to parties governed by the given identity provider + - includeCreatedEventBlob: whether the result should contain the createdEventBlobs, it works only + if the filterTemplate is non-empty + """ + ) + def of_all( + limit: PositiveInt = defaultLimit, + verbose: Boolean = true, + filterTemplates: Seq[TemplateId] = Seq.empty, + activeAtOffset: String = "", + timeout: config.NonNegativeDuration = timeouts.unbounded, + identityProviderId: String = "", + includeCreatedEventBlob: Boolean = false, + ): Seq[GetActiveContractsResponse] = check(FeatureFlag.Testing)( + consoleEnvironment.runE { + for { + parties <- ledgerApiCommand( + LedgerApiCommands.PartyManagementService.ListKnownParties( + identityProviderId = identityProviderId + ) + ).toEither + localParties <- parties.filter(_.isLocal).map(_.party).traverse(LfPartyId.fromString) + res <- { + if (localParties.isEmpty) Right(Seq.empty) + else { + ledgerApiCommand( + LedgerApiV2Commands.StateService.GetActiveContracts( + localParties.toSet, + limit, + filterTemplates, + activeAtOffset, + verbose, + timeout.asFiniteApproximation, + includeCreatedEventBlob, + )(consoleEnvironment.environment.scheduler) + ).toEither + } + } + } yield res + } + ) + + @Help.Summary( + "Wait until the party sees the given contract in the active contract service", + FeatureFlag.Testing, + ) + @Help.Description( + "Will throw an exception if the contract is not found to be active within the given timeout" + ) + def await_active_contract( + party: PartyId, + contractId: LfContractId, + timeout: config.NonNegativeDuration = timeouts.ledgerCommand, + ): Unit = check(FeatureFlag.Testing) { + ConsoleMacros.utils.retry_until_true(timeout) { + of_party(party, verbose = false) + .exists( + _.contractEntry.activeContract.exists( + _.getCreatedEvent.contractId == contractId.coid + ) + ) + } + } + + @Help.Summary("Generic search for contracts", FeatureFlag.Testing) + @Help.Description( + """This search function returns an untyped ledger-api event. + |The find will wait until the contract appears or throw an exception once it times out.""" + ) + def find_generic( + partyId: PartyId, + filter: GetActiveContractsResponse => Boolean, + timeout: config.NonNegativeDuration = timeouts.ledgerCommand, + ): GetActiveContractsResponse = check(FeatureFlag.Testing) { + def scan: Option[GetActiveContractsResponse] = of_party(partyId).find(filter(_)) + + ConsoleMacros.utils.retry_until_true(timeout)(scan.isDefined) + consoleEnvironment.runE { + scan.toRight(s"Failed to find contract for $partyId.") + } + } + } + } + + @Help.Summary("Manage parties through the Ledger API", FeatureFlag.Testing) + @Help.Group("Party Management") + object parties extends Helpful { + + @Help.Summary("Allocate a new party", FeatureFlag.Testing) + @Help.Description( + """Allocates a new party on the ledger. + party: a hint for generating the party identifier + displayName: a human-readable name of this party + annotations: key-value pairs associated with this party and stored locally on this Ledger API server + identityProviderId: identity provider id""" + ) + def allocate( + party: String, + displayName: String, + annotations: Map[String, String] = Map.empty, + identityProviderId: String = "", + ): PartyDetails = { + val proto = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.PartyManagementService.AllocateParty( + partyIdHint = party, + displayName = displayName, + annotations = annotations, + identityProviderId = identityProviderId, + ) + ) + }) + PartyDetails.fromProtoPartyDetails(proto) + } + + @Help.Summary("List parties known by the Ledger API server", FeatureFlag.Testing) + @Help.Description( + """Lists parties known by the Ledger API server. + identityProviderId: identity provider id""" + ) + def list(identityProviderId: String = ""): Seq[PartyDetails] = { + val proto = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.PartyManagementService.ListKnownParties( + identityProviderId = identityProviderId + ) + ) + }) + proto.map(PartyDetails.fromProtoPartyDetails) + } + + @Help.Summary("Update participant-local party details") + @Help.Description( + """Currently you can update only the annotations. + |You cannot update other user attributes. + party: party to be updated, + modifier: a function to modify the party details, e.g.: `partyDetails => { partyDetails.copy(annotations = partyDetails.annotations.updated("a", "b").removed("c")) }` + identityProviderId: identity provider id""" + ) + def update( + party: PartyId, + modifier: PartyDetails => PartyDetails, + identityProviderId: String = "", + ): PartyDetails = { + val rawDetails = get(party = party) + val srcDetails = PartyDetails.fromProtoPartyDetails(rawDetails) + val modifiedDetails = modifier(srcDetails) + verifyOnlyModifiableFieldsWhereModified(srcDetails, modifiedDetails) + val annotationsUpdate = makeAnnotationsUpdate( + original = srcDetails.annotations, + modified = modifiedDetails.annotations, + ) + val rawUpdatedDetails = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.PartyManagementService.Update( + party = party, + annotationsUpdate = Some(annotationsUpdate), + resourceVersionO = Some(rawDetails.localMetadata.fold("")(_.resourceVersion)), + identityProviderId = identityProviderId, + ) + ) + }) + PartyDetails.fromProtoPartyDetails(rawUpdatedDetails) + } + + @Help.Summary("Update party's identity provider id", FeatureFlag.Testing) + @Help.Description( + """Updates party's identity provider id. + party: party to be updated + sourceIdentityProviderId: source identity provider id + targetIdentityProviderId: target identity provider id + """ + ) + def update_idp( + party: PartyId, + sourceIdentityProviderId: String, + targetIdentityProviderId: String, + ): Unit = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.PartyManagementService.UpdateIdp( + party = party, + sourceIdentityProviderId = sourceIdentityProviderId, + targetIdentityProviderId = targetIdentityProviderId, + ) + ) + }) + + private def verifyOnlyModifiableFieldsWhereModified( + srcDetails: PartyDetails, + modifiedDetails: PartyDetails, + ): Unit = { + val withAllowedUpdatesReverted = modifiedDetails.copy(annotations = srcDetails.annotations) + if (withAllowedUpdatesReverted != srcDetails) { + throw ModifyingNonModifiablePartyDetailsPropertiesError() + } + } + + private def get(party: PartyId, identityProviderId: String = ""): ProtoPartyDetails = { + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.PartyManagementService.GetParty( + party = party, + identityProviderId = identityProviderId, + ) + ) + }) + } + + } + + @Help.Summary("Manage packages", FeatureFlag.Testing) + @Help.Group("Package Management") + object packages extends Helpful { + + @Help.Summary("Upload packages from Dar file", FeatureFlag.Testing) + @Help.Description("""Uploading the Dar can be done either through the ledger Api server or through the Canton admin Api. + |The Ledger Api is the portable method across ledgers. The Canton admin Api is more powerful as it allows for + |controlling Canton specific behaviour. + |In particular, a Dar uploaded using the ledger Api will not be available in the Dar store and can not be downloaded again. + |Additionally, Dars uploaded using the ledger Api will be vetted, but the system will not wait + |for the Dars to be successfully registered with all connected domains. As such, if a Dar is uploaded and then + |used immediately thereafter, a command might bounce due to missing package vettings.""") + def upload_dar(darPath: String): Unit = check(FeatureFlag.Testing) { + consoleEnvironment.run { + ledgerApiCommand(LedgerApiCommands.PackageService.UploadDarFile(darPath)) + } + } + + @Help.Summary("List Daml Packages", FeatureFlag.Testing) + def list(limit: PositiveInt = defaultLimit): Seq[PackageDetails] = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand(LedgerApiCommands.PackageService.ListKnownPackages(limit)) + }) + + } + + @Help.Summary("Monitor progress of commands", FeatureFlag.Testing) + @Help.Group("Command Completions") + object completions extends Helpful { + + @Help.Summary("Lists command completions following the specified offset", FeatureFlag.Testing) + @Help.Description( + """If the participant has been pruned via `pruning.prune` and if `beginOffset` is lower than + |the pruning offset, this command fails with a `NOT_FOUND` error.""" + ) + def list( + partyId: PartyId, + atLeastNumCompletions: Int, + beginOffset: ParticipantOffset, + applicationId: String = applicationId, + timeout: NonNegativeDuration = timeouts.ledgerCommand, + filter: CompletionWrapper => Boolean = _ => true, + ): Seq[CompletionWrapper] = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiV2Commands.CommandCompletionService.CompletionRequest( + partyId.toLf, + beginOffset, + atLeastNumCompletions, + timeout.asJavaApproximation, + applicationId, + )(filter, consoleEnvironment.environment.scheduler) + ) + }) + + @Help.Summary("Subscribe to the command completion stream", FeatureFlag.Testing) + @Help.Description( + """This function connects to the command completion stream and passes command completions to `observer` until + |the stream is completed. + |Only completions for parties in `parties` will be returned. + |The returned completions start at `beginOffset` (default: `PARTICIPANT_BEGIN`). + |If the participant has been pruned via `pruning.prune` and if `beginOffset` is lower than the pruning offset, + |this command fails with a `NOT_FOUND` error.""" + ) + def subscribe( + observer: StreamObserver[CompletionWrapper], + parties: Seq[PartyId], + beginOffset: ParticipantOffset = new ParticipantOffset().withBoundary( + ParticipantOffset.ParticipantBoundary.PARTICIPANT_BEGIN + ), + applicationId: String = applicationId, + ): AutoCloseable = { + check(FeatureFlag.Testing)( + consoleEnvironment.run { + ledgerApiCommand( + LedgerApiV2Commands.CommandCompletionService.Subscribe( + observer, + parties.map(_.toLf), + Some(beginOffset), + applicationId, + ) + ) + } + ) + } + } + + @Help.Summary("Identity Provider Configuration Management", FeatureFlag.Testing) + @Help.Group("Ledger Api Identity Provider Configuration Management") + object identity_provider_config extends Helpful { + @Help.Summary("Create a new identity provider configuration", FeatureFlag.Testing) + @Help.Description( + """Create an identity provider configuration. The request will fail if the maximum allowed number of separate configurations is reached.""" + ) + def create( + identityProviderId: String, + isDeactivated: Boolean = false, + jwksUrl: String, + issuer: String, + audience: Option[String], + ): IdentityProviderConfig = { + val config = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.IdentityProviderConfigs.Create( + identityProviderId = + IdentityProviderId.Id(Ref.LedgerString.assertFromString(identityProviderId)), + isDeactivated = isDeactivated, + jwksUrl = JwksUrl.assertFromString(jwksUrl), + issuer = issuer, + audience = audience, + ) + ) + }) + IdentityProviderConfigClient.fromProtoConfig(config) + } + + @Help.Summary("Update an identity provider", FeatureFlag.Testing) + @Help.Description("""Update identity provider""") + def update( + identityProviderId: String, + isDeactivated: Boolean = false, + jwksUrl: String, + issuer: String, + audience: Option[String], + updateMask: FieldMask, + ): IdentityProviderConfig = { + val config = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.IdentityProviderConfigs.Update( + domain.IdentityProviderConfig( + IdentityProviderId.Id(Ref.LedgerString.assertFromString(identityProviderId)), + isDeactivated, + JwksUrl(jwksUrl), + issuer, + audience, + ), + updateMask, + ) + ) + }) + IdentityProviderConfigClient.fromProtoConfig(config) + } + + @Help.Summary("Delete an identity provider configuration", FeatureFlag.Testing) + @Help.Description("""Delete an existing identity provider configuration""") + def delete(identityProviderId: String): Unit = { + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.IdentityProviderConfigs.Delete( + IdentityProviderId.Id(Ref.LedgerString.assertFromString(identityProviderId)) + ) + ) + }) + } + + @Help.Summary("Get an identity provider configuration", FeatureFlag.Testing) + @Help.Description("""Get identity provider configuration by id""") + def get(identityProviderId: String): IdentityProviderConfig = { + val config = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.IdentityProviderConfigs.Get( + IdentityProviderId.Id(Ref.LedgerString.assertFromString(identityProviderId)) + ) + ) + }) + IdentityProviderConfigClient.fromProtoConfig(config) + } + + @Help.Summary("List identity provider configurations", FeatureFlag.Testing) + @Help.Description("""List all existing identity provider configurations""") + def list(): Seq[IdentityProviderConfig] = { + val configs = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.IdentityProviderConfigs.List() + ) + }) + configs.map(IdentityProviderConfigClient.fromProtoConfig) + } + } + + @Help.Summary("Manage Ledger Api Users", FeatureFlag.Testing) + @Help.Group("Ledger Api Users") + object users extends Helpful { + + @Help.Summary("Create a user with the given id", FeatureFlag.Testing) + @Help.Description( + """Users are used to dynamically managing the rights given to Daml applications. + |They allow us to link a stable local identifier (of an application) with a set of parties. + id: the id used to identify the given user + actAs: the set of parties this user is allowed to act as + primaryParty: the optional party that should be linked to this user by default + readAs: the set of parties this user is allowed to read as + participantAdmin: flag (default false) indicating if the user is allowed to use the admin commands of the Ledger Api + isActive: flag (default true) indicating if the user is active + annotations: the set of key-value pairs linked to this user + identityProviderId: identity provider id + """ + ) + def create( + id: String, + actAs: Set[PartyId] = Set(), + primaryParty: Option[PartyId] = None, + readAs: Set[PartyId] = Set(), + participantAdmin: Boolean = false, + isActive: Boolean = true, + annotations: Map[String, String] = Map.empty, + identityProviderId: String = "", + ): User = { + val lapiUser = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Users.Create( + id = id, + actAs = actAs.map(_.toLf), + primaryParty = primaryParty.map(_.toLf), + readAs = readAs.map(_.toLf), + participantAdmin = participantAdmin, + isDeactivated = !isActive, + annotations = annotations, + identityProviderId = identityProviderId, + ) + ) + }) + User.fromLapiUser(lapiUser) + } + + @Help.Summary("Update a user", FeatureFlag.Testing) + @Help.Description( + """Currently you can update the annotations, active status and primary party. + |You cannot update other user attributes. + id: id of the user to be updated + modifier: a function for modifying the user; e.g: `user => { user.copy(isActive = false, primaryParty = None, annotations = user.annotations.updated("a", "b").removed("c")) }` + identityProviderId: identity provider id + """ + ) + def update( + id: String, + modifier: User => User, + identityProviderId: String = "", + ): User = { + val rawUser = doGet( + id = id, + identityProviderId = identityProviderId, + ) + val srcUser = User.fromLapiUser(rawUser) + val modifiedUser = modifier(srcUser) + verifyOnlyModifiableFieldsWhereModified(srcUser, modifiedUser) + val annotationsUpdate = + makeAnnotationsUpdate(original = srcUser.annotations, modified = modifiedUser.annotations) + val rawUpdatedUser = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Users.Update( + id = id, + annotationsUpdate = Some(annotationsUpdate), + primaryPartyUpdate = Some(modifiedUser.primaryParty), + isDeactivatedUpdate = Some(!modifiedUser.isActive), + resourceVersionO = Some(rawUser.metadata.resourceVersion), + identityProviderId = identityProviderId, + ) + ) + }) + User.fromLapiUser(rawUpdatedUser) + } + + @Help.Summary("Get the user data of the user with the given id", FeatureFlag.Testing) + @Help.Description( + """Fetch the data associated with the given user id failing if there is no such user. + |You will get the user's primary party, active status and annotations. + |If you need the user rights, use rights.list instead. + id: user id + identityProviderId: identity provider id""" + ) + def get(id: String, identityProviderId: String = ""): User = User.fromLapiUser( + doGet( + id = id, + identityProviderId = identityProviderId, + ) + ) + + @Help.Summary("Delete a user", FeatureFlag.Testing) + @Help.Description("""Delete a user by id. + id: user id + identityProviderId: identity provider id""") + def delete(id: String, identityProviderId: String = ""): Unit = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Users.Delete( + id = id, + identityProviderId = identityProviderId, + ) + ) + }) + + @Help.Summary("List users", FeatureFlag.Testing) + @Help.Description("""List users of this participant node + filterUser: filter results using the given filter string + pageToken: used for pagination (the result contains a page token if there are further pages) + pageSize: default page size before the filter is applied + identityProviderId: identity provider id""") + def list( + filterUser: String = "", + pageToken: String = "", + pageSize: Int = 100, + identityProviderId: String = "", + ): UsersPage = { + val page: ListLedgerApiUsersResult = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Users.List( + filterUser = filterUser, + pageToken = pageToken, + pageSize = pageSize, + identityProviderId = identityProviderId, + ) + ) + }) + UsersPage( + users = page.users.map(User.fromLapiUser), + nextPageToken = page.nextPageToken, + ) + } + + @Help.Summary("Update user's identity provider id", FeatureFlag.Testing) + @Help.Description( + """Updates user's identity provider id. + id: the id used to identify the given user + sourceIdentityProviderId: source identity provider id + targetIdentityProviderId: target identity provider id + """ + ) + def update_idp( + id: String, + sourceIdentityProviderId: String, + targetIdentityProviderId: String, + ): Unit = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Users.UpdateIdp( + id = id, + sourceIdentityProviderId = sourceIdentityProviderId, + targetIdentityProviderId = targetIdentityProviderId, + ) + ) + }) + + private def verifyOnlyModifiableFieldsWhereModified( + srcUser: User, + modifiedUser: User, + ): Unit = { + val withAllowedUpdatesReverted = modifiedUser.copy( + primaryParty = srcUser.primaryParty, + isActive = srcUser.isActive, + annotations = srcUser.annotations, + ) + if (withAllowedUpdatesReverted != srcUser) { + throw ModifyingNonModifiableUserPropertiesError() + } + } + + private def doGet(id: String, identityProviderId: String): LedgerApiUser = { + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Users.Get( + id = id, + identityProviderId = identityProviderId, + ) + ) + }) + } + + @Help.Summary("Manage Ledger Api User Rights", FeatureFlag.Testing) + @Help.Group("Ledger Api User Rights") + object rights extends Helpful { + + @Help.Summary("Grant new rights to a user", FeatureFlag.Testing) + @Help.Description("""Users are used to dynamically managing the rights given to Daml applications. + |This function is used to grant new rights to an existing user. + id: the id used to identify the given user + actAs: the set of parties this user is allowed to act as + readAs: the set of parties this user is allowed to read as + participantAdmin: flag (default false) indicating if the user is allowed to use the admin commands of the Ledger Api + identityProviderId: identity provider id + """) + def grant( + id: String, + actAs: Set[PartyId], + readAs: Set[PartyId] = Set(), + participantAdmin: Boolean = false, + identityProviderId: String = "", + ): UserRights = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Users.Rights.Grant( + id = id, + actAs = actAs.map(_.toLf), + readAs = readAs.map(_.toLf), + participantAdmin = participantAdmin, + identityProviderId = identityProviderId, + ) + ) + }) + + @Help.Summary("Revoke user rights", FeatureFlag.Testing) + @Help.Description("""Use to revoke specific rights from a user. + id: the id used to identify the given user + actAs: the set of parties this user should not be allowed to act as + readAs: the set of parties this user should not be allowed to read as + participantAdmin: if set to true, the participant admin rights will be removed + identityProviderId: identity provider id + """) + def revoke( + id: String, + actAs: Set[PartyId], + readAs: Set[PartyId] = Set(), + participantAdmin: Boolean = false, + identityProviderId: String = "", + ): UserRights = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Users.Rights.Revoke( + id = id, + actAs = actAs.map(_.toLf), + readAs = readAs.map(_.toLf), + participantAdmin = participantAdmin, + identityProviderId = identityProviderId, + ) + ) + }) + + @Help.Summary("List rights of a user", FeatureFlag.Testing) + @Help.Description("""Lists the rights of a user, or the rights of the current user. + id: user id + identityProviderId: identity provider id""") + def list(id: String, identityProviderId: String = ""): UserRights = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Users.Rights.List( + id = id, + identityProviderId = identityProviderId, + ) + ) + }) + + } + + } + + @Help.Summary("Retrieve the ledger metering", FeatureFlag.Testing) + @Help.Group("Metering") + object metering extends Helpful { + + @Help.Summary("Get the ledger metering report", FeatureFlag.Testing) + @Help.Description("""Returns the current ledger metering report + from: required from timestamp (inclusive) + to: optional to timestamp + application_id: optional application id to which we want to restrict the report + """) + def get_report( + from: CantonTimestamp, + to: Option[CantonTimestamp] = None, + applicationId: Option[String] = None, + ): String = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Metering.GetReport( + from, + to, + applicationId, + ) + ) + }) + } + + @Help.Summary("Interact with the time service", FeatureFlag.Testing) + @Help.Group("Time") + object time { + @Help.Summary("Get the participants time", FeatureFlag.Testing) + @Help.Description("""Returns the current timestamp of the participant which is either the + system clock or the static time""") + def get(): CantonTimestamp = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiV2Commands.Time.Get + ) + }) + + @Help.Summary("Set the participants time", FeatureFlag.Testing) + @Help.Description( + """Sets the participants time if the participant is running in static time mode""" + ) + def set(currentTime: CantonTimestamp, nextTime: CantonTimestamp): Unit = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand(LedgerApiV2Commands.Time.Set(currentTime, nextTime)) + }) + + } + + @Help.Summary("Query event details", FeatureFlag.Testing) + @Help.Group("EventQuery") + object event_query extends Helpful { + + @Help.Summary("Get events by contract Id", FeatureFlag.Testing) + @Help.Description("""Return events associated with the given contract Id""") + def by_contract_id( + contractId: String, + requestingParties: Seq[PartyId], + ): GetEventsByContractIdResponseV2 = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiV2Commands.QueryService + .GetEventsByContractId(contractId, requestingParties.map(_.toLf)) + ) + }) + } + + // TODO(#15274) + @Help.Summary("Group of commands that utilize java bindings", FeatureFlag.Testing) + @Help.Group("Ledger Api (Java bindings)") + object javaapi extends Helpful + + private def waitForUpdateId( + administration: BaseLedgerApiAdministration, + from: ParticipantOffset, + queryPartyId: PartyId, + updateId: String, + timeout: config.NonNegativeDuration, + ): UpdateWrapper = { + def logPrefix: String = + s"As waiting for update-id:$updateId at participant:$administration with querying party:$queryPartyId starting from $from: " + Try( + administration.ledger_api_v2.updates + .flat( + partyIds = Set(queryPartyId), + beginOffset = from, + completeAfter = 1, + resultFilter = { + case reassignmentW: ReassignmentWrapper => + reassignmentW.reassignment.updateId == updateId + case TransactionWrapper(transaction) => transaction.updateId == updateId + }, + timeout = timeout, + ) + ) match { + case Success(values) if values.size == 1 => values(0) + case Success(values) => + throw new IllegalStateException( + s"$logPrefix Exactely one update expected, but received #${values.size}" + ) + case Failure(t) => throw new IllegalStateException(s"$logPrefix an exception occurred.", t) + } + } + } + + @Help.Summary("Group of commands that access the ledger-api", FeatureFlag.Testing) + @Help.Group("Ledger Api") + object ledger_api extends Helpful { + + @Help.Summary("Read from transaction stream", FeatureFlag.Testing) + @Help.Group("Transactions") + object transactions extends Helpful { + + @Help.Summary("Get ledger end", FeatureFlag.Testing) + def end(): LedgerOffset = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand(LedgerApiCommands.TransactionService.GetLedgerEnd()) + }) + + @Help.Summary("Get transaction trees", FeatureFlag.Testing) + @Help.Description( + """This function connects to the transaction tree stream for the given parties and collects transaction trees + |until either `completeAfter` transaction trees have been received or `timeout` has elapsed. + |The returned transaction trees can be filtered to be between the given offsets (default: no filtering). + |If the participant has been pruned via `pruning.prune` and if `beginOffset` is lower than the pruning offset, + |this command fails with a `NOT_FOUND` error.""" + ) + def trees( + partyIds: Set[PartyId], + completeAfter: Int, + beginOffset: LedgerOffset = + new LedgerOffset().withBoundary(LedgerOffset.LedgerBoundary.LEDGER_BEGIN), + endOffset: Option[LedgerOffset] = None, + verbose: Boolean = true, + timeout: config.NonNegativeDuration = timeouts.ledgerCommand, + ): Seq[TransactionTree] = check(FeatureFlag.Testing)({ + val observer = new RecordingStreamObserver[TransactionTree](completeAfter) + val filter = TransactionFilter(partyIds.map(_.toLf -> Filters()).toMap) + mkResult( + subscribe_trees(observer, filter, beginOffset, endOffset, verbose), + "getTransactionTrees", + observer, + timeout, + ) + }) + + private def mkResult[Res]( + call: => AutoCloseable, + requestDescription: String, + observer: RecordingStreamObserver[Res], + timeout: config.NonNegativeDuration, + ): Seq[Res] = consoleEnvironment.run { + try { + ResourceUtil.withResource(call) { _ => + // Not doing noisyAwaitResult here, because we don't want to log warnings in case of a timeout. + CommandSuccessful(Await.result(observer.result, timeout.duration)) + } + } catch { + case sre: StatusRuntimeException => + GenericCommandError(GrpcError(requestDescription, name, sre).toString) + case _: TimeoutException => CommandSuccessful(observer.responses) + } + } + + @Help.Summary("Subscribe to the transaction tree stream", FeatureFlag.Testing) + @Help.Description( + """This function connects to the transaction tree stream and passes transaction trees to `observer` until + |the stream is completed. + |Only transaction trees for parties in `filter.filterByParty.keys` will be returned. + |Use `filter = TransactionFilter(Map(myParty.toLf -> Filters()))` to return all trees for `myParty: PartyId`. + |The returned transactions can be filtered to be between the given offsets (default: no filtering). + |If the participant has been pruned via `pruning.prune` and if `beginOffset` is lower than the pruning offset, + |this command fails with a `NOT_FOUND` error.""" + ) + def subscribe_trees( + observer: StreamObserver[TransactionTree], + filter: TransactionFilter, + beginOffset: LedgerOffset = + new LedgerOffset().withBoundary(LedgerOffset.LedgerBoundary.LEDGER_BEGIN), + endOffset: Option[LedgerOffset] = None, + verbose: Boolean = true, + ): AutoCloseable = { + check(FeatureFlag.Testing)( + consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.TransactionService.SubscribeTrees( + observer, + beginOffset, + endOffset, + filter, + verbose, + ) + ) + } + ) + } + + @Help.Summary("Get flat transactions", FeatureFlag.Testing) + @Help.Description( + """This function connects to the flat transaction stream for the given parties and collects transactions + |until either `completeAfter` transaction trees have been received or `timeout` has elapsed. + |The returned transactions can be filtered to be between the given offsets (default: no filtering). + |If the participant has been pruned via `pruning.prune` and if `beginOffset` is lower than the pruning offset, + |this command fails with a `NOT_FOUND` error. If you need to specify filtering conditions for template IDs and + |including create event blobs for explicit disclosure, consider using `ledger_api.transactions.flat_with_tx_filter`.""" + ) + def flat( + partyIds: Set[PartyId], + completeAfter: Int, + beginOffset: LedgerOffset = + new LedgerOffset().withBoundary(LedgerOffset.LedgerBoundary.LEDGER_BEGIN), + endOffset: Option[LedgerOffset] = None, + verbose: Boolean = true, + timeout: config.NonNegativeDuration = timeouts.ledgerCommand, + ): Seq[Transaction] = check(FeatureFlag.Testing)({ + val filter = TransactionFilter(partyIds.map(_.toLf -> Filters()).toMap) + flat_with_tx_filter(filter, completeAfter, beginOffset, endOffset, verbose, timeout) + }) + + @Help.Summary("Get flat transactions", FeatureFlag.Testing) + @Help.Description( + """This function connects to the flat transaction stream for the given transaction filter and collects transactions + |until either `completeAfter` transactions have been received or `timeout` has elapsed. + |The returned transactions can be filtered to be between the given offsets (default: no filtering). + |If the participant has been pruned via `pruning.prune` and if `beginOffset` is lower than the pruning offset, + |this command fails with a `NOT_FOUND` error. If you only need to filter by a set of parties, consider using + |`ledger_api.transactions.flat` instead.""" + ) + def flat_with_tx_filter( + filter: TransactionFilter, + completeAfter: Int, + beginOffset: LedgerOffset = + new LedgerOffset().withBoundary(LedgerOffset.LedgerBoundary.LEDGER_BEGIN), + endOffset: Option[LedgerOffset] = None, + verbose: Boolean = true, + timeout: config.NonNegativeDuration = timeouts.ledgerCommand, + ): Seq[Transaction] = check(FeatureFlag.Testing)({ + val observer = new RecordingStreamObserver[Transaction](completeAfter) + mkResult( + subscribe_flat(observer, filter, beginOffset, endOffset, verbose), + "getTransactions", + observer, + timeout, + ) + }) + + @Help.Summary("Subscribe to the flat transaction stream", FeatureFlag.Testing) + @Help.Description("""This function connects to the flat transaction stream and passes transactions to `observer` until + |the stream is completed. + |Only transactions for parties in `filter.filterByParty.keys` will be returned. + |Use `filter = TransactionFilter(Map(myParty.toLf -> Filters()))` to return all transactions for `myParty: PartyId`. + |The returned transactions can be filtered to be between the given offsets (default: no filtering). + |If the participant has been pruned via `pruning.prune` and if `beginOffset` is lower than the pruning offset, + |this command fails with a `NOT_FOUND` error.""") + def subscribe_flat( + observer: StreamObserver[Transaction], + filter: TransactionFilter, + beginOffset: LedgerOffset = + new LedgerOffset().withBoundary(LedgerOffset.LedgerBoundary.LEDGER_BEGIN), + endOffset: Option[LedgerOffset] = None, + verbose: Boolean = true, + ): AutoCloseable = { + check(FeatureFlag.Testing)( + consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.TransactionService.SubscribeFlat( + observer, + beginOffset, + endOffset, + filter, + verbose, + ) + ) + } + ) + } + + @Help.Summary("Starts measuring throughput at the transaction service", FeatureFlag.Testing) + @Help.Description( + """This function will subscribe on behalf of `parties` to the transaction tree stream and + |notify various metrics: + |The metric `.` counts the number of transaction trees emitted. + |The metric `.-tx-node-count` tracks the number of root events emitted as part of transaction trees. + |The metric `.-tx-size` tracks the number of bytes emitted as part of transaction trees. + | + |To stop measuring, you need to close the returned `AutoCloseable`. + |Use the `onTransaction` parameter to register a callback that is called on every transaction tree.""" + ) + def start_measuring( + parties: Set[PartyId], + metricSuffix: String, + onTransaction: TransactionTree => Unit = _ => (), + )(implicit consoleEnvironment: ConsoleEnvironment): AutoCloseable = + check(FeatureFlag.Testing) { + + val metricName = MetricName(name, metricSuffix) + + val observer: StreamObserver[TransactionTree] = new StreamObserver[TransactionTree] { + + @nowarn("cat=deprecation") + val metricsFactory: MetricHandle.MetricsFactory = + consoleEnvironment.environment.metricsFactory.metricsFactory + + val metric: Meter = metricsFactory.meter(metricName) + val nodeCount: Histogram = metricsFactory.histogram(metricName :+ "tx-node-count") + val transactionSize: Histogram = metricsFactory.histogram(metricName :+ "tx-size") + + override def onNext(tree: TransactionTree): Unit = { + val s = tree.rootEventIds.size.toLong + metric.mark(s)(MetricsContext.Empty) + nodeCount.update(s) + transactionSize.update(tree.serializedSize)(MetricsContext.Empty) + onTransaction(tree) + } + + override def onError(t: Throwable): Unit = t match { + case t: StatusRuntimeException => + val err = GrpcError("start_measuring", name, t) + err match { + case gaveUp: GrpcError.GrpcClientGaveUp if gaveUp.isClientCancellation => + logger.info(s"Client cancelled measuring throughput (metric: $metricName).") + case _ => + logger.warn( + s"An error occurred while measuring throughput (metric: $metricName). Stop measuring. $err" + ) + } + case _: Throwable => + logger.warn( + s"An exception occurred while measuring throughput (metric: $metricName). Stop measuring.", + t, + ) + } + + override def onCompleted(): Unit = + logger.info(s"Stop measuring throughput (metric: $metricName).") + } + + val filterParty = TransactionFilter(parties.map(_.toLf -> Filters()).toMap) + + logger.info(s"Start measuring throughput (metric: $metricName).") + subscribe_trees(observer, filterParty, end(), verbose = false) + } + + @Help.Summary("Get a (tree) transaction by its ID", FeatureFlag.Testing) + @Help.Description( + """Get a transaction tree from the transaction stream by its ID. Returns None if the transaction is not (yet) + |known at the participant or if the transaction has been pruned via `pruning.prune`.""" + ) + def by_id(parties: Set[PartyId], id: String): Option[TransactionTree] = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.TransactionService.GetTransactionById(parties.map(_.toLf), id)( + consoleEnvironment.environment.executionContext + ) + ) + }) + + @Help.Summary("Get the domain that a transaction was committed over.", FeatureFlag.Testing) + @Help.Description( + """Get the domain that a transaction was committed over. Throws an error if the transaction is not (yet) known + |to the participant or if the transaction has been pruned via `pruning.prune`.""" + ) + def domain_of(transactionId: String): DomainId = + check(FeatureFlag.Testing)(domainOfTransaction(transactionId)) + } + + @Help.Summary("Submit commands", FeatureFlag.Testing) + @Help.Group("Command Submission") + object commands extends Helpful { + + @Help.Summary( + "Submit command and wait for the resulting transaction, returning the transaction tree or failing otherwise", + FeatureFlag.Testing, + ) + @Help.Description( + """Submits a command on behalf of the `actAs` parties, waits for the resulting transaction to commit and returns it. + | If the timeout is set, it also waits for the transaction to appear at all other configured + | participants who were involved in the transaction. The call blocks until the transaction commits or fails; + | the timeout only specifies how long to wait at the other participants. + | Fails if the transaction doesn't commit, or if it doesn't become visible to the involved participants in + | the allotted time. + | Note that if the optTimeout is set and the involved parties are concurrently enabled/disabled or their + | participants are connected/disconnected, the command may currently result in spurious timeouts or may + | return before the transaction appears at all the involved participants.""" + ) + def submit( + actAs: Seq[PartyId], + commands: Seq[Command], + workflowId: String = "", + commandId: String = "", + optTimeout: Option[NonNegativeDuration] = Some(timeouts.ledgerCommand), + deduplicationPeriod: Option[DeduplicationPeriod] = None, + submissionId: String = "", + minLedgerTimeAbs: Option[Instant] = None, + readAs: Seq[PartyId] = Seq.empty, + disclosedContracts: Seq[DisclosedContract] = Seq.empty, + applicationId: String = applicationId, + ): TransactionTree = check(FeatureFlag.Testing) { + val tx = consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.CommandService.SubmitAndWaitTransactionTree( + actAs.map(_.toLf), + readAs.map(_.toLf), + commands, + workflowId, + commandId, + deduplicationPeriod, + submissionId, + minLedgerTimeAbs, + disclosedContracts, + applicationId, + ) + ) + } + optionallyAwait(tx, tx.transactionId, optTimeout) + } + + @Help.Summary( + "Submit command and wait for the resulting transaction, returning the flattened transaction or failing otherwise", + FeatureFlag.Testing, + ) + @Help.Description( + """Submits a command on behalf of the `actAs` parties, waits for the resulting transaction to commit, and returns the "flattened" transaction. + | If the timeout is set, it also waits for the transaction to appear at all other configured + | participants who were involved in the transaction. The call blocks until the transaction commits or fails; + | the timeout only specifies how long to wait at the other participants. + | Fails if the transaction doesn't commit, or if it doesn't become visible to the involved participants in + | the allotted time. + | Note that if the optTimeout is set and the involved parties are concurrently enabled/disabled or their + | participants are connected/disconnected, the command may currently result in spurious timeouts or may + | return before the transaction appears at all the involved participants.""" + ) + def submit_flat( + actAs: Seq[PartyId], + commands: Seq[Command], + workflowId: String = "", + commandId: String = "", + optTimeout: Option[config.NonNegativeDuration] = Some(timeouts.ledgerCommand), + deduplicationPeriod: Option[DeduplicationPeriod] = None, + submissionId: String = "", + minLedgerTimeAbs: Option[Instant] = None, + readAs: Seq[PartyId] = Seq.empty, + disclosedContracts: Seq[DisclosedContract] = Seq.empty, + applicationId: String = applicationId, + ): Transaction = check(FeatureFlag.Testing) { + val tx = consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.CommandService.SubmitAndWaitTransaction( + actAs.map(_.toLf), + readAs.map(_.toLf), + commands, + workflowId, + commandId, + deduplicationPeriod, + submissionId, + minLedgerTimeAbs, + disclosedContracts, + applicationId, + ) + ) + } + optionallyAwait(tx, tx.transactionId, optTimeout) + } + + @Help.Summary("Submit command asynchronously", FeatureFlag.Testing) + @Help.Description( + """Provides access to the command submission service of the Ledger API. + |See https://docs.daml.com/app-dev/services.html for documentation of the parameters.""" + ) + def submit_async( + actAs: Seq[PartyId], + commands: Seq[Command], + workflowId: String = "", + commandId: String = "", + deduplicationPeriod: Option[DeduplicationPeriod] = None, + submissionId: String = "", + minLedgerTimeAbs: Option[Instant] = None, + readAs: Seq[PartyId] = Seq.empty, + disclosedContracts: Seq[DisclosedContract] = Seq.empty, + applicationId: String = applicationId, + ): Unit = check(FeatureFlag.Testing) { + consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.CommandSubmissionService.Submit( + actAs.map(_.toLf), + readAs.map(_.toLf), + commands, + workflowId, + commandId, + deduplicationPeriod, + submissionId, + minLedgerTimeAbs, + disclosedContracts, + applicationId, + ) + ) + } + } + + } + + @Help.Summary("Read active contracts", FeatureFlag.Testing) + @Help.Group("Active Contracts") + object acs extends Helpful { + @Help.Summary("List the set of active contracts of a given party", FeatureFlag.Testing) + @Help.Description( + """This command will return the current set of active contracts for the given party. + + Supported arguments: + - party: for which party you want to load the acs + - limit: limit (default set via canton.parameter.console) + - verbose: whether the resulting events should contain detailed type information + - filterTemplate: list of templates ids to filter for, empty sequence acts as a wildcard + - timeout: the maximum wait time for the complete acs to arrive + - includeCreatedEventBlob: whether the result should contain the createdEventBlobs, it works only + if the filterTemplate is non-empty + """ + ) + def of_party( + party: PartyId, + limit: PositiveInt = defaultLimit, + verbose: Boolean = true, + filterTemplates: Seq[TemplateId] = Seq.empty, + timeout: config.NonNegativeDuration = timeouts.unbounded, + includeCreatedEventBlob: Boolean = false, + ): Seq[WrappedCreatedEvent] = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.AcsService + .GetActiveContracts( + Set(party.toLf), + limit, + filterTemplates, + verbose, + timeout.asFiniteApproximation, + includeCreatedEventBlob, + )(consoleEnvironment.environment.scheduler) + ) + }) + + @Help.Summary( + "List the set of active contracts for all parties hosted on this participant", + FeatureFlag.Testing, + ) + @Help.Description( + """This command will return the current set of active contracts for all parties. + + Supported arguments: + - limit: limit (default set via canton.parameter.console) + - verbose: whether the resulting events should contain detailed type information + - filterTemplate: list of templates ids to filter for, empty sequence acts as a wildcard + - timeout: the maximum wait time for the complete acs to arrive + - identityProviderId: limit the response to parties governed by the given identity provider + - includeCreatedEventBlob: whether the result should contain the createdEventBlobs, it works only + if the filterTemplate is non-empty + """ + ) + def of_all( + limit: PositiveInt = defaultLimit, + verbose: Boolean = true, + filterTemplates: Seq[TemplateId] = Seq.empty, + timeout: config.NonNegativeDuration = timeouts.unbounded, + identityProviderId: String = "", + includeCreatedEventBlob: Boolean = false, + ): Seq[WrappedCreatedEvent] = check(FeatureFlag.Testing)( + consoleEnvironment.runE { + for { + parties <- ledgerApiCommand( + LedgerApiCommands.PartyManagementService.ListKnownParties( + identityProviderId = identityProviderId + ) + ).toEither + localParties <- parties.filter(_.isLocal).map(_.party).traverse(LfPartyId.fromString) + res <- { + if (localParties.isEmpty) Right(Seq.empty) + else { + ledgerApiCommand( + LedgerApiCommands.AcsService.GetActiveContracts( + localParties.toSet, + limit, + filterTemplates, + verbose, + timeout.asFiniteApproximation, + includeCreatedEventBlob, + )(consoleEnvironment.environment.scheduler) + ).toEither + } + } + } yield res + } + ) + + @Help.Summary( + "Wait until the party sees the given contract in the active contract service", + FeatureFlag.Testing, + ) + @Help.Description( + "Will throw an exception if the contract is not found to be active within the given timeout" + ) + def await_active_contract( + party: PartyId, + contractId: LfContractId, + timeout: config.NonNegativeDuration = timeouts.ledgerCommand, + ): Unit = check(FeatureFlag.Testing) { + ConsoleMacros.utils.retry_until_true(timeout) { + of_party(party, verbose = false) + .exists(_.event.contractId == contractId.coid) + } + } + + @Help.Summary("Generic search for contracts", FeatureFlag.Testing) + @Help.Description( + """This search function returns an untyped ledger-api event. + |The find will wait until the contract appears or throw an exception once it times out.""" + ) + def find_generic( + partyId: PartyId, + filter: WrappedCreatedEvent => Boolean, + timeout: config.NonNegativeDuration = timeouts.ledgerCommand, + ): WrappedCreatedEvent = check(FeatureFlag.Testing) { + def scan: Option[WrappedCreatedEvent] = of_party(partyId).find(filter(_)) + ConsoleMacros.utils.retry_until_true(timeout)(scan.isDefined) + consoleEnvironment.runE { + scan.toRight(s"Failed to find contract for $partyId.") + } + } + } + + @Help.Summary("Manage parties through the Ledger API", FeatureFlag.Testing) + @Help.Group("Party Management") + object parties extends Helpful { + + @Help.Summary("Allocate a new party", FeatureFlag.Testing) + @Help.Description( + """Allocates a new party on the ledger. + party: a hint for generating the party identifier + displayName: a human-readable name of this party + annotations: key-value pairs associated with this party and stored locally on this Ledger API server + identityProviderId: identity provider id""" + ) + def allocate( + party: String, + displayName: String, + annotations: Map[String, String] = Map.empty, + identityProviderId: String = "", + ): PartyDetails = { + val proto = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.PartyManagementService.AllocateParty( + partyIdHint = party, + displayName = displayName, + annotations = annotations, + identityProviderId = identityProviderId, + ) + ) + }) + PartyDetails.fromProtoPartyDetails(proto) + } + + @Help.Summary("List parties known by the Ledger API server", FeatureFlag.Testing) + @Help.Description( + """Lists parties known by the Ledger API server. + identityProviderId: identity provider id""" + ) + def list(identityProviderId: String = ""): Seq[PartyDetails] = { + val proto = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.PartyManagementService.ListKnownParties( + identityProviderId = identityProviderId + ) + ) + }) + proto.map(PartyDetails.fromProtoPartyDetails) + } + + @Help.Summary("Update participant-local party details") + @Help.Description( + """Currently you can update only the annotations. + |You cannot update other user attributes. + party: party to be updated, + modifier: a function to modify the party details, e.g.: `partyDetails => { partyDetails.copy(annotations = partyDetails.annotations.updated("a", "b").removed("c")) }` + identityProviderId: identity provider id""" + ) + def update( + party: PartyId, + modifier: PartyDetails => PartyDetails, + identityProviderId: String = "", + ): PartyDetails = { + val rawDetails = get(party = party) + val srcDetails = PartyDetails.fromProtoPartyDetails(rawDetails) + val modifiedDetails = modifier(srcDetails) + verifyOnlyModifiableFieldsWhereModified(srcDetails, modifiedDetails) + val annotationsUpdate = makeAnnotationsUpdate( + original = srcDetails.annotations, + modified = modifiedDetails.annotations, + ) + val rawUpdatedDetails = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.PartyManagementService.Update( + party = party, + annotationsUpdate = Some(annotationsUpdate), + resourceVersionO = Some(rawDetails.localMetadata.fold("")(_.resourceVersion)), + identityProviderId = identityProviderId, + ) + ) + }) + PartyDetails.fromProtoPartyDetails(rawUpdatedDetails) + } + + @Help.Summary("Update party's identity provider id", FeatureFlag.Testing) + @Help.Description( + """Updates party's identity provider id. + party: party to be updated + sourceIdentityProviderId: source identity provider id + targetIdentityProviderId: target identity provider id + """ + ) + def update_idp( + party: PartyId, + sourceIdentityProviderId: String, + targetIdentityProviderId: String, + ): Unit = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.PartyManagementService.UpdateIdp( + party = party, + sourceIdentityProviderId = sourceIdentityProviderId, + targetIdentityProviderId = targetIdentityProviderId, + ) + ) + }) + + private def verifyOnlyModifiableFieldsWhereModified( + srcDetails: PartyDetails, + modifiedDetails: PartyDetails, + ): Unit = { + val withAllowedUpdatesReverted = modifiedDetails.copy(annotations = srcDetails.annotations) + if (withAllowedUpdatesReverted != srcDetails) { + throw ModifyingNonModifiablePartyDetailsPropertiesError() + } + } + + private def get(party: PartyId, identityProviderId: String = ""): ProtoPartyDetails = { + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.PartyManagementService.GetParty( + party = party, + identityProviderId = identityProviderId, + ) + ) + }) + } + + } + + @Help.Summary("Manage packages", FeatureFlag.Testing) + @Help.Group("Package Management") + object packages extends Helpful { + + @Help.Summary("Upload packages from Dar file", FeatureFlag.Testing) + @Help.Description("""Uploading the Dar can be done either through the ledger Api server or through the Canton admin Api. + |The Ledger Api is the portable method across ledgers. The Canton admin Api is more powerful as it allows for + |controlling Canton specific behaviour. + |In particular, a Dar uploaded using the ledger Api will not be available in the Dar store and can not be downloaded again. + |Additionally, Dars uploaded using the ledger Api will be vetted, but the system will not wait + |for the Dars to be successfully registered with all connected domains. As such, if a Dar is uploaded and then + |used immediately thereafter, a command might bounce due to missing package vettings.""") + def upload_dar(darPath: String): Unit = check(FeatureFlag.Testing) { + consoleEnvironment.run { + ledgerApiCommand(LedgerApiCommands.PackageService.UploadDarFile(darPath)) + } + } + + @Help.Summary("List Daml Packages", FeatureFlag.Testing) + def list(limit: PositiveInt = defaultLimit): Seq[PackageDetails] = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand(LedgerApiCommands.PackageService.ListKnownPackages(limit)) + }) + + } + + @Help.Summary("Monitor progress of commands", FeatureFlag.Testing) + @Help.Group("Command Completions") + object completions extends Helpful { + + @Help.Summary("Read the current command completion offset", FeatureFlag.Testing) + def end(): LedgerOffset = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.CommandCompletionService.CompletionEnd() + ) + }) + + @Help.Summary("Lists command completions following the specified offset", FeatureFlag.Testing) + @Help.Description( + """If the participant has been pruned via `pruning.prune` and if `offset` is lower than + |the pruning offset, this command fails with a `NOT_FOUND` error.""" + ) + def list( + partyId: PartyId, + atLeastNumCompletions: Int, + offset: LedgerOffset, + applicationId: String = applicationId, + timeout: NonNegativeDuration = timeouts.ledgerCommand, + filter: Completion => Boolean = _ => true, + ): Seq[Completion] = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.CommandCompletionService.CompletionRequest( + partyId.toLf, + offset, + atLeastNumCompletions, + timeout.asJavaApproximation, + applicationId, + )(filter, consoleEnvironment.environment.scheduler) + ) + }) + + @Help.Summary( + "Lists command completions following the specified offset along with the checkpoints included in the completions", + FeatureFlag.Testing, + ) + @Help.Description( + """If the participant has been pruned via `pruning.prune` and if `offset` is lower than + |the pruning offset, this command fails with a `NOT_FOUND` error.""" + ) + def list_with_checkpoint( + partyId: PartyId, + atLeastNumCompletions: Int, + offset: LedgerOffset, + applicationId: String = applicationId, + timeout: NonNegativeDuration = timeouts.ledgerCommand, + filter: Completion => Boolean = _ => true, + ): Seq[(Completion, Option[Checkpoint])] = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.CommandCompletionService.CompletionCheckpointRequest( + partyId.toLf, + offset, + atLeastNumCompletions, + timeout, + applicationId, + )(filter, consoleEnvironment.environment.scheduler) + ) + }) + + @Help.Summary("Subscribe to the command completion stream", FeatureFlag.Testing) + @Help.Description( + """This function connects to the command completion stream and passes command completions to `observer` until + |the stream is completed. + |Only completions for parties in `parties` will be returned. + |The returned completions start at `beginOffset` (default: `LEDGER_BEGIN`). + |If the participant has been pruned via `pruning.prune` and if `beginOffset` is lower than the pruning offset, + |this command fails with a `NOT_FOUND` error.""" + ) + def subscribe( + observer: StreamObserver[Completion], + parties: Seq[PartyId], + beginOffset: LedgerOffset = + new LedgerOffset().withBoundary(LedgerOffset.LedgerBoundary.LEDGER_BEGIN), + applicationId: String = applicationId, + ): AutoCloseable = { + check(FeatureFlag.Testing)( + consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.CommandCompletionService.Subscribe( + observer, + parties.map(_.toLf), + Some(beginOffset), + applicationId, + ) + ) + } + ) + } + } + + @Help.Summary("Retrieve the ledger configuration", FeatureFlag.Testing) + @Help.Group("Ledger Configuration") + object configuration extends Helpful { + + @Help.Summary("Obtain the ledger configuration", FeatureFlag.Testing) + @Help.Description("""Returns the current ledger configuration and subsequent updates until + the expected number of configs was retrieved or the timeout is over.""") + def list( + expectedConfigs: Int = 1, + timeout: config.NonNegativeDuration = timeouts.ledgerCommand, + ): Seq[LedgerConfiguration] = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.LedgerConfigurationService.GetLedgerConfiguration( + expectedConfigs, + timeout.asFiniteApproximation, + )(consoleEnvironment.environment.scheduler) + ) + }) + } + + @Help.Summary("Identity Provider Configuration Management", FeatureFlag.Testing) + @Help.Group("Ledger Api Identity Provider Configuration Management") + object identity_provider_config extends Helpful { + @Help.Summary("Create a new identity provider configuration", FeatureFlag.Testing) + @Help.Description( + """Create an identity provider configuration. The request will fail if the maximum allowed number of separate configurations is reached.""" + ) + def create( + identityProviderId: String, + isDeactivated: Boolean = false, + jwksUrl: String, + issuer: String, + audience: Option[String], + ): IdentityProviderConfig = { + val config = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.IdentityProviderConfigs.Create( + identityProviderId = + IdentityProviderId.Id(Ref.LedgerString.assertFromString(identityProviderId)), + isDeactivated = isDeactivated, + jwksUrl = JwksUrl.assertFromString(jwksUrl), + issuer = issuer, + audience = audience, + ) + ) + }) + IdentityProviderConfigClient.fromProtoConfig(config) + } + + @Help.Summary("Update an identity provider", FeatureFlag.Testing) + @Help.Description("""Update identity provider""") + def update( + identityProviderId: String, + isDeactivated: Boolean = false, + jwksUrl: String, + issuer: String, + audience: Option[String], + updateMask: FieldMask, + ): IdentityProviderConfig = { + val config = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.IdentityProviderConfigs.Update( + domain.IdentityProviderConfig( + IdentityProviderId.Id(Ref.LedgerString.assertFromString(identityProviderId)), + isDeactivated, + JwksUrl(jwksUrl), + issuer, + audience, + ), + updateMask, + ) + ) + }) + IdentityProviderConfigClient.fromProtoConfig(config) + } + + @Help.Summary("Delete an identity provider configuration", FeatureFlag.Testing) + @Help.Description("""Delete an existing identity provider configuration""") + def delete(identityProviderId: String): Unit = { + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.IdentityProviderConfigs.Delete( + IdentityProviderId.Id(Ref.LedgerString.assertFromString(identityProviderId)) + ) + ) + }) + } + + @Help.Summary("Get an identity provider configuration", FeatureFlag.Testing) + @Help.Description("""Get identity provider configuration by id""") + def get(identityProviderId: String): IdentityProviderConfig = { + val config = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.IdentityProviderConfigs.Get( + IdentityProviderId.Id(Ref.LedgerString.assertFromString(identityProviderId)) + ) + ) + }) + IdentityProviderConfigClient.fromProtoConfig(config) + } + + @Help.Summary("List identity provider configurations", FeatureFlag.Testing) + @Help.Description("""List all existing identity provider configurations""") + def list(): Seq[IdentityProviderConfig] = { + val configs = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.IdentityProviderConfigs.List() + ) + }) + configs.map(IdentityProviderConfigClient.fromProtoConfig) + } + } + + @Help.Summary("Manage Ledger Api Users", FeatureFlag.Testing) + @Help.Group("Ledger Api Users") + object users extends Helpful { + + @Help.Summary("Create a user with the given id", FeatureFlag.Testing) + @Help.Description( + """Users are used to dynamically managing the rights given to Daml applications. + |They allow us to link a stable local identifier (of an application) with a set of parties. + id: the id used to identify the given user + actAs: the set of parties this user is allowed to act as + primaryParty: the optional party that should be linked to this user by default + readAs: the set of parties this user is allowed to read as + participantAdmin: flag (default false) indicating if the user is allowed to use the admin commands of the Ledger Api + isActive: flag (default true) indicating if the user is active + annotations: the set of key-value pairs linked to this user + identityProviderId: identity provider id + """ + ) + def create( + id: String, + actAs: Set[PartyId] = Set(), + primaryParty: Option[PartyId] = None, + readAs: Set[PartyId] = Set(), + participantAdmin: Boolean = false, + isActive: Boolean = true, + annotations: Map[String, String] = Map.empty, + identityProviderId: String = "", + ): User = { + val lapiUser = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Users.Create( + id = id, + actAs = actAs.map(_.toLf), + primaryParty = primaryParty.map(_.toLf), + readAs = readAs.map(_.toLf), + participantAdmin = participantAdmin, + isDeactivated = !isActive, + annotations = annotations, + identityProviderId = identityProviderId, + ) + ) + }) + User.fromLapiUser(lapiUser) + } + + @Help.Summary("Update a user", FeatureFlag.Testing) + @Help.Description( + """Currently you can update the annotations, active status and primary party. + |You cannot update other user attributes. + id: id of the user to be updated + modifier: a function for modifying the user; e.g: `user => { user.copy(isActive = false, primaryParty = None, annotations = user.annotations.updated("a", "b").removed("c")) }` + identityProviderId: identity provider id + """ + ) + def update( + id: String, + modifier: User => User, + identityProviderId: String = "", + ): User = { + val rawUser = doGet( + id = id, + identityProviderId = identityProviderId, + ) + val srcUser = User.fromLapiUser(rawUser) + val modifiedUser = modifier(srcUser) + verifyOnlyModifiableFieldsWhereModified(srcUser, modifiedUser) + val annotationsUpdate = + makeAnnotationsUpdate(original = srcUser.annotations, modified = modifiedUser.annotations) + val rawUpdatedUser = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Users.Update( + id = id, + annotationsUpdate = Some(annotationsUpdate), + primaryPartyUpdate = Some(modifiedUser.primaryParty), + isDeactivatedUpdate = Some(!modifiedUser.isActive), + resourceVersionO = Some(rawUser.metadata.resourceVersion), + identityProviderId = identityProviderId, + ) + ) + }) + User.fromLapiUser(rawUpdatedUser) + } + + @Help.Summary("Get the user data of the user with the given id", FeatureFlag.Testing) + @Help.Description( + """Fetch the data associated with the given user id failing if there is no such user. + |You will get the user's primary party, active status and annotations. + |If you need the user rights, use rights.list instead. + id: user id + identityProviderId: identity provider id""" + ) + def get(id: String, identityProviderId: String = ""): User = User.fromLapiUser( + doGet( + id = id, + identityProviderId = identityProviderId, + ) + ) + + @Help.Summary("Delete a user", FeatureFlag.Testing) + @Help.Description("""Delete a user by id. + id: user id + identityProviderId: identity provider id""") + def delete(id: String, identityProviderId: String = ""): Unit = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Users.Delete( + id = id, + identityProviderId = identityProviderId, + ) + ) + }) + + @Help.Summary("List users", FeatureFlag.Testing) + @Help.Description("""List users of this participant node + filterUser: filter results using the given filter string + pageToken: used for pagination (the result contains a page token if there are further pages) + pageSize: default page size before the filter is applied + identityProviderId: identity provider id""") + def list( + filterUser: String = "", + pageToken: String = "", + pageSize: Int = 100, + identityProviderId: String = "", + ): UsersPage = { + val page: ListLedgerApiUsersResult = check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Users.List( + filterUser = filterUser, + pageToken = pageToken, + pageSize = pageSize, + identityProviderId = identityProviderId, + ) + ) + }) + UsersPage( + users = page.users.map(User.fromLapiUser), + nextPageToken = page.nextPageToken, + ) + } + + @Help.Summary("Update user's identity provider id", FeatureFlag.Testing) + @Help.Description( + """Updates user's identity provider id. + id: the id used to identify the given user + sourceIdentityProviderId: source identity provider id + targetIdentityProviderId: target identity provider id + """ + ) + def update_idp( + id: String, + sourceIdentityProviderId: String, + targetIdentityProviderId: String, + ): Unit = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Users.UpdateIdp( + id = id, + sourceIdentityProviderId = sourceIdentityProviderId, + targetIdentityProviderId = targetIdentityProviderId, + ) + ) + }) + + private def verifyOnlyModifiableFieldsWhereModified( + srcUser: User, + modifiedUser: User, + ): Unit = { + val withAllowedUpdatesReverted = modifiedUser.copy( + primaryParty = srcUser.primaryParty, + isActive = srcUser.isActive, + annotations = srcUser.annotations, + ) + if (withAllowedUpdatesReverted != srcUser) { + throw ModifyingNonModifiableUserPropertiesError() + } + } + + private def doGet(id: String, identityProviderId: String): LedgerApiUser = { + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Users.Get( + id = id, + identityProviderId = identityProviderId, + ) + ) + }) + } + + @Help.Summary("Manage Ledger Api User Rights", FeatureFlag.Testing) + @Help.Group("Ledger Api User Rights") + object rights extends Helpful { + + @Help.Summary("Grant new rights to a user", FeatureFlag.Testing) + @Help.Description("""Users are used to dynamically managing the rights given to Daml applications. + |This function is used to grant new rights to an existing user. + id: the id used to identify the given user + actAs: the set of parties this user is allowed to act as + readAs: the set of parties this user is allowed to read as + participantAdmin: flag (default false) indicating if the user is allowed to use the admin commands of the Ledger Api + identityProviderId: identity provider id + """) + def grant( + id: String, + actAs: Set[PartyId], + readAs: Set[PartyId] = Set(), + participantAdmin: Boolean = false, + identityProviderId: String = "", + ): UserRights = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Users.Rights.Grant( + id = id, + actAs = actAs.map(_.toLf), + readAs = readAs.map(_.toLf), + participantAdmin = participantAdmin, + identityProviderId = identityProviderId, + ) + ) + }) + + @Help.Summary("Revoke user rights", FeatureFlag.Testing) + @Help.Description("""Use to revoke specific rights from a user. + id: the id used to identify the given user + actAs: the set of parties this user should not be allowed to act as + readAs: the set of parties this user should not be allowed to read as + participantAdmin: if set to true, the participant admin rights will be removed + identityProviderId: identity provider id + """) + def revoke( + id: String, + actAs: Set[PartyId], + readAs: Set[PartyId] = Set(), + participantAdmin: Boolean = false, + identityProviderId: String = "", + ): UserRights = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Users.Rights.Revoke( + id = id, + actAs = actAs.map(_.toLf), + readAs = readAs.map(_.toLf), + participantAdmin = participantAdmin, + identityProviderId = identityProviderId, + ) + ) + }) + + @Help.Summary("List rights of a user", FeatureFlag.Testing) + @Help.Description("""Lists the rights of a user, or the rights of the current user. + id: user id + identityProviderId: identity provider id""") + def list(id: String, identityProviderId: String = ""): UserRights = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Users.Rights.List( + id = id, + identityProviderId = identityProviderId, + ) + ) + }) + + } + + } + + @Help.Summary("Retrieve the ledger metering", FeatureFlag.Testing) + @Help.Group("Metering") + object metering extends Helpful { + + @Help.Summary("Get the ledger metering report", FeatureFlag.Testing) + @Help.Description("""Returns the current ledger metering report + from: required from timestamp (inclusive) + to: optional to timestamp + application_id: optional application id to which we want to restrict the report + """) + def get_report( + from: CantonTimestamp, + to: Option[CantonTimestamp] = None, + applicationId: Option[String] = None, + ): String = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Metering.GetReport( + from, + to, + applicationId, + ) + ) + }) + } + + @Help.Summary("Interact with the time service", FeatureFlag.Testing) + @Help.Group("Time") + object time { + @Help.Summary("Get the participants time", FeatureFlag.Testing) + @Help.Description("""Returns the current timestamp of the participant which is either the + system clock or the static time""") + def get(): CantonTimestamp = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.Time.Get(timeouts.ledgerCommand.asFiniteApproximation)( + consoleEnvironment.environment.scheduler + ) + ) + }) + + @Help.Summary("Set the participants time", FeatureFlag.Testing) + @Help.Description( + """Sets the participants time if the participant is running in static time mode""" + ) + def set(currentTime: CantonTimestamp, nextTime: CantonTimestamp): Unit = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand(LedgerApiCommands.Time.Set(currentTime, nextTime)) + }) + + } + + @Help.Summary("Query event details", FeatureFlag.Testing) + @Help.Group("EventQuery") + object event_query extends Helpful { + + @Help.Summary("Get events by contract Id", FeatureFlag.Testing) + @Help.Description("""Return events associated with the given contract Id""") + def by_contract_id( + contractId: String, + requestingParties: Seq[PartyId], + ): GetEventsByContractIdResponse = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.QueryService + .GetEventsByContractId(contractId, requestingParties.map(_.toLf)) + ) + }) + + @Help.Summary("Get events by contract key", FeatureFlag.Testing) + @Help.Description("""Return events associated with the given contract key""") + def by_contract_key( + contractKey: api.v1.value.Value, + requestingParties: Seq[PartyId], + templateId: TemplateId, + continuationToken: Option[String] = None, + ): GetEventsByContractKeyResponse = + check(FeatureFlag.Testing)(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.QueryService + .GetEventsByContractKey( + contractKey, + requestingParties.map(_.toLf), + templateId, + continuationToken, + ) + ) + }) + } + + @Help.Summary("Group of commands that utilize java bindings", FeatureFlag.Testing) + @Help.Group("Ledger Api (Java bindings)") + object javaapi extends Helpful { + + @Help.Summary("Submit commands (Java bindings)", FeatureFlag.Testing) + @Help.Group("Command Submission (Java bindings)") + object commands extends Helpful { + @Help.Summary( + "Submit java codegen commands and wait for the resulting transaction, returning the transaction tree or failing otherwise", + FeatureFlag.Testing, + ) + @Help.Description( + """Submits a command on behalf of the `actAs` parties, waits for the resulting transaction to commit and returns it. + | If the timeout is set, it also waits for the transaction to appear at all other configured + | participants who were involved in the transaction. The call blocks until the transaction commits or fails; + | the timeout only specifies how long to wait at the other participants. + | Fails if the transaction doesn't commit, or if it doesn't become visible to the involved participants in + | the allotted time. + | Note that if the optTimeout is set and the involved parties are concurrently enabled/disabled or their + | participants are connected/disconnected, the command may currently result in spurious timeouts or may + | return before the transaction appears at all the involved participants.""" + ) + def submit( + actAs: Seq[PartyId], + commands: Seq[javab.data.Command], + workflowId: String = "", + commandId: String = "", + optTimeout: Option[NonNegativeDuration] = Some(timeouts.ledgerCommand), + deduplicationPeriod: Option[DeduplicationPeriod] = None, + submissionId: String = "", + minLedgerTimeAbs: Option[Instant] = None, + readAs: Seq[PartyId] = Seq.empty, + disclosedContracts: Seq[javab.data.DisclosedContract] = Seq.empty, + applicationId: String = applicationId, + ): javab.data.TransactionTree = check(FeatureFlag.Testing) { + val tx = consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.CommandService.SubmitAndWaitTransactionTree( + actAs.map(_.toLf), + readAs.map(_.toLf), + commands.map(c => Command.fromJavaProto(c.toProtoCommand)), + workflowId, + commandId, + deduplicationPeriod, + submissionId, + minLedgerTimeAbs, + disclosedContracts.map(c => DisclosedContract.fromJavaProto(c.toProto)), + applicationId, + ) + ) + } + javab.data.TransactionTree.fromProto( + TransactionTree.toJavaProto(optionallyAwait(tx, tx.transactionId, optTimeout)) + ) + } + + @Help.Summary( + "Submit java codegen command and wait for the resulting transaction, returning the flattened transaction or failing otherwise", + FeatureFlag.Testing, + ) + @Help.Description( + """Submits a command on behalf of the `actAs` parties, waits for the resulting transaction to commit, and returns the "flattened" transaction. + | If the timeout is set, it also waits for the transaction to appear at all other configured + | participants who were involved in the transaction. The call blocks until the transaction commits or fails; + | the timeout only specifies how long to wait at the other participants. + | Fails if the transaction doesn't commit, or if it doesn't become visible to the involved participants in + | the allotted time. + | Note that if the optTimeout is set and the involved parties are concurrently enabled/disabled or their + | participants are connected/disconnected, the command may currently result in spurious timeouts or may + | return before the transaction appears at all the involved participants.""" + ) + def submit_flat( + actAs: Seq[PartyId], + commands: Seq[javab.data.Command], + workflowId: String = "", + commandId: String = "", + optTimeout: Option[config.NonNegativeDuration] = Some(timeouts.ledgerCommand), + deduplicationPeriod: Option[DeduplicationPeriod] = None, + submissionId: String = "", + minLedgerTimeAbs: Option[Instant] = None, + readAs: Seq[PartyId] = Seq.empty, + disclosedContracts: Seq[javab.data.DisclosedContract] = Seq.empty, + applicationId: String = applicationId, + ): javab.data.Transaction = check(FeatureFlag.Testing) { + val tx = consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.CommandService.SubmitAndWaitTransaction( + actAs.map(_.toLf), + readAs.map(_.toLf), + commands.map(c => Command.fromJavaProto(c.toProtoCommand)), + workflowId, + commandId, + deduplicationPeriod, + submissionId, + minLedgerTimeAbs, + disclosedContracts.map(c => DisclosedContract.fromJavaProto(c.toProto)), + applicationId, + ) + ) + } + javab.data.Transaction.fromProto( + Transaction.toJavaProto(optionallyAwait(tx, tx.transactionId, optTimeout)) + ) + } + + @Help.Summary("Submit java codegen command asynchronously", FeatureFlag.Testing) + @Help.Description( + """Provides access to the command submission service of the Ledger API. + |See https://docs.daml.com/app-dev/services.html for documentation of the parameters.""" + ) + def submit_async( + actAs: Seq[PartyId], + commands: Seq[javab.data.Command], + workflowId: String = "", + commandId: String = "", + deduplicationPeriod: Option[DeduplicationPeriod] = None, + submissionId: String = "", + minLedgerTimeAbs: Option[Instant] = None, + readAs: Seq[PartyId] = Seq.empty, + disclosedContracts: Seq[javab.data.DisclosedContract] = Seq.empty, + applicationId: String = applicationId, + ): Unit = + ledger_api.commands.submit_async( + actAs, + commands.map(c => Command.fromJavaProto(c.toProtoCommand)), + workflowId, + commandId, + deduplicationPeriod, + submissionId, + minLedgerTimeAbs, + readAs, + disclosedContracts.map(c => DisclosedContract.fromJavaProto(c.toProto)), + applicationId, + ) + + } + + @Help.Summary("Read from transaction stream (Java bindings)", FeatureFlag.Testing) + @Help.Group("Transactions (Java bindings)") + object transactions extends Helpful { + + @Help.Summary( + "Get transaction trees in the format expected by the Java bindings", + FeatureFlag.Testing, + ) + @Help.Description( + """This function connects to the transaction tree stream for the given parties and collects transaction trees + |until either `completeAfter` transaction trees have been received or `timeout` has elapsed. + |The returned transaction trees can be filtered to be between the given offsets (default: no filtering). + |If the participant has been pruned via `pruning.prune` and if `beginOffset` is lower than the pruning offset, + |this command fails with a `NOT_FOUND` error.""" + ) + def trees( + partyIds: Set[PartyId], + completeAfter: Int, + beginOffset: LedgerOffset = + new LedgerOffset().withBoundary(LedgerOffset.LedgerBoundary.LEDGER_BEGIN), + endOffset: Option[LedgerOffset] = None, + verbose: Boolean = true, + timeout: config.NonNegativeDuration = timeouts.ledgerCommand, + ): Seq[javab.data.TransactionTree] = check(FeatureFlag.Testing)({ + ledger_api.transactions + .trees(partyIds, completeAfter, beginOffset, endOffset, verbose, timeout) + .map(t => javab.data.TransactionTree.fromProto(TransactionTree.toJavaProto(t))) + }) + + @Help.Summary( + "Get flat transactions in the format expected by the Java bindings", + FeatureFlag.Testing, + ) + @Help.Description( + """This function connects to the flat transaction stream for the given parties and collects transactions + |until either `completeAfter` transaction trees have been received or `timeout` has elapsed. + |The returned transactions can be filtered to be between the given offsets (default: no filtering). + |If the participant has been pruned via `pruning.prune` and if `beginOffset` is lower than the pruning offset, + |this command fails with a `NOT_FOUND` error.If you only need to filter by a set of parties, consider using + |`ledger_api.javaapi.transactions.flat` instead.""" + ) + def flat( + partyIds: Set[PartyId], + completeAfter: Int, + beginOffset: LedgerOffset = + new LedgerOffset().withBoundary(LedgerOffset.LedgerBoundary.LEDGER_BEGIN), + endOffset: Option[LedgerOffset] = None, + verbose: Boolean = true, + timeout: config.NonNegativeDuration = timeouts.ledgerCommand, + ): Seq[javab.data.Transaction] = check(FeatureFlag.Testing)({ + ledger_api.transactions + .flat(partyIds, completeAfter, beginOffset, endOffset, verbose, timeout) + .map(t => javab.data.Transaction.fromProto(Transaction.toJavaProto(t))) + }) + + @Help.Summary( + "Get flat transactions in the format expected by the Java bindings", + FeatureFlag.Testing, + ) + @Help.Description( + """This function connects to the flat transaction stream for the given transaction filter and collects transactions + |until either `completeAfter` transactions have been received or `timeout` has elapsed. + |The returned transactions can be filtered to be between the given offsets (default: no filtering). + |If the participant has been pruned via `pruning.prune` and if `beginOffset` is lower than the pruning offset, + |this command fails with a `NOT_FOUND` error. If you need to specify filtering conditions for template IDs and + |including create event blobs for explicit disclosure, consider using `ledger_api.javaapi.transactions.flat_with_tx_filter`.""" + ) + def flat_with_tx_filter( + filter: javab.data.TransactionFilter, + completeAfter: Int, + beginOffset: LedgerOffset = + new LedgerOffset().withBoundary(LedgerOffset.LedgerBoundary.LEDGER_BEGIN), + endOffset: Option[LedgerOffset] = None, + verbose: Boolean = true, + timeout: config.NonNegativeDuration = timeouts.ledgerCommand, + ): Seq[javab.data.Transaction] = check(FeatureFlag.Testing)({ + ledger_api.transactions + .flat_with_tx_filter( + TransactionFilter.fromJavaProto(filter.toProto), + completeAfter, + beginOffset, + endOffset, + verbose, + timeout, + ) + .map(t => javab.data.Transaction.fromProto(Transaction.toJavaProto(t))) + }) + + } + + @Help.Summary("Read active contracts (Java bindings)", FeatureFlag.Testing) + @Help.Group("Active Contracts (Java bindings)") + object acs extends Helpful { + + @Help.Summary( + "Wait until a contract becomes available and return the Java codegen contract", + FeatureFlag.Testing, + ) + @Help.Description( + """This function can be used for contracts with a code-generated Scala model. + |You can refine your search using the `filter` function argument. + |The command will wait until the contract appears or throw an exception once it times out.""" + ) + def await[ + TC <: javab.data.codegen.Contract[TCid, T], + TCid <: javab.data.codegen.ContractId[T], + T <: javab.data.Template, + ](companion: javab.data.codegen.ContractCompanion[TC, TCid, T])( + partyId: PartyId, + predicate: TC => Boolean = (_: TC) => true, + timeout: config.NonNegativeDuration = timeouts.ledgerCommand, + ): TC = check(FeatureFlag.Testing)({ + val result = new AtomicReference[Option[TC]](None) + ConsoleMacros.utils.retry_until_true(timeout) { + val tmp = filter(companion)(partyId, predicate) + result.set(tmp.headOption) + tmp.nonEmpty + } + consoleEnvironment.runE { + result + .get() + .toRight(s"Failed to find contract of type ${companion.TEMPLATE_ID} after $timeout") + } + }) + + @Help.Summary( + "Filter the ACS for contracts of a particular Java code-generated template", + FeatureFlag.Testing, + ) + @Help.Description( + """To use this function, ensure a code-generated Java model for the target template exists. + |You can refine your search using the `predicate` function argument.""" + ) + def filter[ + TC <: javab.data.codegen.Contract[TCid, T], + TCid <: javab.data.codegen.ContractId[T], + T <: javab.data.Template, + ](templateCompanion: javab.data.codegen.ContractCompanion[TC, TCid, T])( + partyId: PartyId, + predicate: TC => Boolean = (_: TC) => true, + ): Seq[TC] = check(FeatureFlag.Testing) { + val javaTemplateId = templateCompanion.TEMPLATE_ID + val templateId = TemplateId( + javaTemplateId.getPackageId, + javaTemplateId.getModuleName, + javaTemplateId.getEntityName, + ) + ledger_api.acs + .of_party(partyId, filterTemplates = Seq(templateId)) + .map(_.event) + .flatMap(ev => + JavaDecodeUtil + .decodeCreated(templateCompanion)( + javab.data.CreatedEvent.fromProto(CreatedEvent.toJavaProto(ev)) + ) + .toList + ) + .filter(predicate) + } + + } + + @Help.Summary("Query event details", FeatureFlag.Testing) + @Help.Group("EventQuery") + object event_query extends Helpful { + + @Help.Summary("Get events in java codegen by contract Id", FeatureFlag.Testing) + @Help.Description("""Return events associated with the given contract Id""") + def by_contract_id( + contractId: String, + requestingParties: Seq[PartyId], + ): EventQueryServiceOuterClass.GetEventsByContractIdResponse = + check(FeatureFlag.Testing)( + GetEventsByContractIdResponse.toJavaProto(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.QueryService + .GetEventsByContractId(contractId, requestingParties.map(_.toLf)) + ) + }) + ) + + @Help.Summary("Get events in java codegen format by contract key", FeatureFlag.Testing) + @Help.Description("""Return events associated with the given contract key""") + def by_contract_key( + contractKey: ValueOuterClass.Value, + requestingParties: Seq[PartyId], + templateId: TemplateId, + continuationToken: Option[String] = None, + ): EventQueryServiceOuterClass.GetEventsByContractKeyResponse = + check(FeatureFlag.Testing)( + GetEventsByContractKeyResponse.toJavaProto(consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.QueryService + .GetEventsByContractKey( + Value.fromJavaProto(contractKey), + requestingParties.map(_.toLf), + templateId, + continuationToken, + ) + ) + }) + ) + + } + + } + + } + + /** @return The modified map where deletion from the original are represented as keys with empty values + */ + private def makeAnnotationsUpdate( + original: Map[String, String], + modified: Map[String, String], + ): Map[String, String] = { + val deletions = original.removedAll(modified.keys).view.mapValues(_ => "").toMap + modified.concat(deletions) + } + +} + +trait LedgerApiAdministration extends BaseLedgerApiAdministration { + this: LedgerApiCommandRunner & AdminCommandRunner & NamedLogging & FeatureFlagFilter => + + implicit protected val consoleEnvironment: ConsoleEnvironment + protected val name: String + + override protected def domainOfTransaction(transactionId: String): DomainId = { + val txId = LedgerTransactionId.assertFromString(transactionId) + consoleEnvironment.run { + adminCommand(ParticipantAdminCommands.Inspection.LookupTransactionDomain(txId)) + } + } + + import com.digitalasset.canton.util.ShowUtil.* + + private def awaitTransaction( + transactionId: String, + at: Map[ParticipantReferenceCommon, PartyId], + timeout: config.NonNegativeDuration, + ): Unit = { + def scan() = { + at.map { case (participant, party) => + ( + participant, + party, + participant.ledger_api.transactions.by_id(Set(party), transactionId).isDefined, + ) + } + } + ConsoleMacros.utils.retry_until_true(timeout)( + scan().forall(_._3), { + val res = scan().map { case (participant, party, res) => + s"${party.toString}@${participant.toString}: ${if (res) "observed" else "not observed"}" + } + s"Failed to observe transaction on all nodes: ${res.mkString(", ")}" + }, + ) + + } + + private[console] def involvedParticipants( + transactionId: String + ): Map[ParticipantReferenceCommon, PartyId] = { + val txDomain = ledger_api.transactions.domain_of(transactionId) + // TODO(#6317) + // There's a race condition here, in the unlikely circumstance that the party->participant mapping on the domain + // changes during the command's execution. We'll have to live with it for the moment, as there's no convenient + // way to get the record time of the transaction to pass to the parties.list call. + val domainPartiesAndParticipants = { + val pNodes = consoleEnvironment.participants.all.iterator + .filter(x => x.health.running() && x.health.initialized() && x.name == name) + .flatMap(_.parties.list(filterDomain = txDomain.filterString)) + .toSet + val pXNodes = consoleEnvironment.participantsX.all.iterator + .filter(x => x.health.running() && x.health.initialized() && x.name == name) + .flatMap(_.parties.list(filterDomain = txDomain.filterString)) + .toSet + pNodes ++ pXNodes + } + + val domainParties = domainPartiesAndParticipants.map(_.party) + // WARNING! this logic will become highly problematic if we introduce witness blinding based on topology events + // Read the transaction under the authority of all parties on the domain, in order to get the witness_parties + // to be all the actual witnesses of the transaction. There's no other convenient way to get the full witnesses, + // as the Exercise events don't contain the informees of the Exercise action. + val tree = ledger_api.transactions + .by_id(domainParties, transactionId) + .getOrElse( + throw new IllegalStateException( + s"Can't find transaction by ID: $transactionId. Queried parties: $domainParties" + ) + ) + val witnesses = tree.eventsById.values + .flatMap { ev => + ev.kind.created.fold(Seq.empty[String])(ev => ev.witnessParties) ++ + ev.kind.exercised.fold(Seq.empty[String])(ev => ev.witnessParties) + } + .map(PartyId.tryFromProtoPrimitive) + .toSet + + // A participant identity equality check that doesn't blow up if the participant isn't running + def identityIs(pRef: ParticipantReferenceCommon, id: ParticipantId): Boolean = pRef match { + case lRef: LocalParticipantReferenceCommon => + lRef.is_running && lRef.health.initialized() && lRef.id == id + case rRef: RemoteParticipantReferenceCommon => + rRef.health.initialized() && rRef.id == id + case _ => false + } + + // Map each involved participant to some party that witnessed the transaction (it doesn't matter which one) + domainPartiesAndParticipants.toList.foldMapK { cand => + if (witnesses.contains(cand.party)) { + val involvedConsoleParticipants = cand.participants.mapFilter { pd => + for { + participantReference <- + (consoleEnvironment.participants.all ++ consoleEnvironment.participantsX.all) + .filter(x => x.health.running() && x.health.initialized()) + .find(identityIs(_, pd.participant)) + _ <- pd.domains.find(_.domain == txDomain) + } yield participantReference + } + involvedConsoleParticipants + .map(_ -> cand.party) + .toMap + } else Map.empty + } + } + + protected def optionallyAwait[Tx]( + tx: Tx, + txId: String, + optTimeout: Option[config.NonNegativeDuration], + ): Tx = { + optTimeout match { + case None => tx + case Some(timeout) => + val involved = involvedParticipants(txId) + logger.debug(show"Awaiting transaction ${txId.unquoted} at ${involved.keys.mkShow()}") + awaitTransaction(txId, involved, timeout) + tx + } + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/MediatorAdministrationGroup.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/MediatorAdministrationGroup.scala new file mode 100644 index 0000000000..0b04cf4bb0 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/MediatorAdministrationGroup.scala @@ -0,0 +1,232 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console.commands + +import com.digitalasset.canton.admin.api.client.commands.EnterpriseMediatorAdministrationCommands.{ + Initialize, + InitializeX, + LocatePruningTimestampCommand, + Prune, +} +import com.digitalasset.canton.admin.api.client.commands.{ + DomainTimeCommands, + PruningSchedulerCommands, +} +import com.digitalasset.canton.admin.api.client.data.StaticDomainParameters +import com.digitalasset.canton.config.NonNegativeDuration +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.console.{ + AdminCommandRunner, + ConsoleEnvironment, + FeatureFlag, + FeatureFlagFilter, + Help, + Helpful, +} +import com.digitalasset.canton.crypto.{Fingerprint, PublicKey} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.domain.admin.v0.EnterpriseMediatorAdministrationServiceGrpc +import com.digitalasset.canton.domain.admin.v0.EnterpriseMediatorAdministrationServiceGrpc.EnterpriseMediatorAdministrationServiceStub +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.sequencing.{SequencerConnection, SequencerConnections} +import com.digitalasset.canton.time.NonNegativeFiniteDuration +import com.digitalasset.canton.topology.store.StoredTopologyTransactions +import com.digitalasset.canton.topology.transaction.TopologyChangeOp +import com.digitalasset.canton.topology.{DomainId, MediatorId} + +import scala.concurrent.duration.FiniteDuration + +class MediatorTestingGroup( + runner: AdminCommandRunner, + val consoleEnvironment: ConsoleEnvironment, + val loggerFactory: NamedLoggerFactory, +) extends FeatureFlagFilter + with Helpful { + + @Help.Summary("Fetch the current time from the domain", FeatureFlag.Testing) + def fetch_domain_time( + timeout: NonNegativeDuration = consoleEnvironment.commandTimeouts.ledgerCommand + ): CantonTimestamp = + check(FeatureFlag.Testing) { + consoleEnvironment.run { + runner.adminCommand( + DomainTimeCommands.FetchTime(None, NonNegativeFiniteDuration.Zero, timeout) + ) + }.timestamp + } + + @Help.Summary("Await for the given time to be reached on the domain", FeatureFlag.Testing) + def await_domain_time(time: CantonTimestamp, timeout: NonNegativeDuration): Unit = + check(FeatureFlag.Testing) { + consoleEnvironment.run { + runner.adminCommand( + DomainTimeCommands.AwaitTime(None, time, timeout) + ) + } + } +} + +class MediatorPruningAdministrationGroup( + runner: AdminCommandRunner, + consoleEnvironment: ConsoleEnvironment, + loggerFactory: NamedLoggerFactory, +) extends PruningSchedulerAdministration( + runner, + consoleEnvironment, + new PruningSchedulerCommands[EnterpriseMediatorAdministrationServiceStub]( + EnterpriseMediatorAdministrationServiceGrpc.stub, + _.setSchedule(_), + _.clearSchedule(_), + _.setCron(_), + _.setMaxDuration(_), + _.setRetention(_), + _.getSchedule(_), + ), + loggerFactory, + ) + with Helpful { + + @Help.Summary( + "Prune the mediator of unnecessary data while keeping data for the default retention period" + ) + @Help.Description( + """Removes unnecessary data from the Mediator that is earlier than the default retention period. + |The default retention period is set in the configuration of the canton node running this + |command under `parameters.retention-period-defaults.mediator`.""" + ) + def prune(): Unit = { + val defaultRetention = + consoleEnvironment.environment.config.parameters.retentionPeriodDefaults.mediator + prune_with_retention_period(defaultRetention.underlying) + } + + @Help.Summary( + "Prune the mediator of unnecessary data while keeping data for the provided retention period" + ) + def prune_with_retention_period(retentionPeriod: FiniteDuration): Unit = { + import scala.jdk.DurationConverters.* + val pruneUpTo = consoleEnvironment.environment.clock.now.minus(retentionPeriod.toJava) + prune_at(pruneUpTo) + } + + @Help.Summary("Prune the mediator of unnecessary data up to and including the given timestamp") + def prune_at(timestamp: CantonTimestamp): Unit = consoleEnvironment.run { + runner.adminCommand(Prune(timestamp)) + } + + @Help.Summary("Obtain a timestamp at or near the beginning of mediator state") + @Help.Description( + """This command provides insight into the current state of mediator pruning when called with + |the default value of `index` 1. + |When pruning the mediator manually via `prune_at` and with the intent to prune in batches, specify + |a value such as 1000 to obtain a pruning timestamp that corresponds to the "end" of the batch.""" + ) + def locate_pruning_timestamp( + index: PositiveInt = PositiveInt.tryCreate(1) + ): Option[CantonTimestamp] = + consoleEnvironment.run { + runner.adminCommand(LocatePruningTimestampCommand(index)) + } + +} + +class MediatorAdministrationGroup( + runner: AdminCommandRunner, + consoleEnvironment: ConsoleEnvironment, + loggerFactory: NamedLoggerFactory, +) extends MediatorPruningAdministrationGroup(runner, consoleEnvironment, loggerFactory) { + + private lazy val testing_ = new MediatorTestingGroup(runner, consoleEnvironment, loggerFactory) + @Help.Summary("Testing functionality for the mediator") + @Help.Group("Testing") + def testing: MediatorTestingGroup = testing_ + +} + +@Help.Summary("Manage the mediator component") +@Help.Group("Mediator") +class MediatorAdministrationGroupWithInit( + runner: AdminCommandRunner, + consoleEnvironment: ConsoleEnvironment, + loggerFactory: NamedLoggerFactory, +) extends MediatorAdministrationGroup(runner, consoleEnvironment, loggerFactory) { + + @Help.Summary("Initialize a mediator") + def initialize( + domainId: DomainId, + mediatorId: MediatorId, + domainParameters: StaticDomainParameters, + sequencerConnections: SequencerConnections, + topologySnapshot: Option[StoredTopologyTransactions[TopologyChangeOp.Positive]], + signingKeyFingerprint: Option[Fingerprint] = None, + ): PublicKey = consoleEnvironment.run { + runner.adminCommand( + Initialize( + domainId, + mediatorId, + topologySnapshot, + domainParameters.toInternal, + sequencerConnections, + signingKeyFingerprint, + ) + ) + } + + @Help.Summary("Initialize a mediator") + def initialize( + domainId: DomainId, + mediatorId: MediatorId, + domainParameters: StaticDomainParameters, + sequencerConnection: SequencerConnection, + topologySnapshot: Option[StoredTopologyTransactions[TopologyChangeOp.Positive]], + signingKeyFingerprint: Option[Fingerprint], + ): PublicKey = consoleEnvironment.run { + runner.adminCommand( + Initialize( + domainId, + mediatorId, + topologySnapshot, + domainParameters.toInternal, + SequencerConnections.single(sequencerConnection), + signingKeyFingerprint, + ) + ) + } + +} + +trait MediatorXAdministrationGroupWithInit extends ConsoleCommandGroup { + + @Help.Summary("Methods used to initialize the node") + object setup extends ConsoleCommandGroup.Impl(this) with InitNodeId { + + @Help.Summary("Assign a mediator to a domain") + def assign( + domainId: DomainId, + domainParameters: StaticDomainParameters, + sequencerConnections: SequencerConnections, + ): Unit = consoleEnvironment.run { + runner.adminCommand( + InitializeX( + domainId, + domainParameters.toInternal, + sequencerConnections, + ) + ) + } + + } + + private lazy val testing_ = new MediatorTestingGroup(runner, consoleEnvironment, loggerFactory) + @Help.Summary("Testing functionality for the mediator") + @Help.Group("Testing") + def testing: MediatorTestingGroup = testing_ + + private lazy val pruning_ = + new MediatorPruningAdministrationGroup(runner, consoleEnvironment, loggerFactory) + @Help.Summary("Pruning functionality for the mediator") + @Help.Group("Testing") + def pruning: MediatorPruningAdministrationGroup = pruning_ + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala new file mode 100644 index 0000000000..0d67dbcadc --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala @@ -0,0 +1,1721 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console.commands + +import cats.syntax.option.* +import cats.syntax.traverse.* +import com.daml.ledger.api.v1.ledger_offset.LedgerOffset +import com.daml.lf.data.Ref.PackageId +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.admin.api.client.commands.ParticipantAdminCommands.Pruning.{ + GetParticipantScheduleCommand, + SetParticipantScheduleCommand, +} +import com.digitalasset.canton.admin.api.client.commands.ParticipantAdminCommands.Resources.{ + GetResourceLimits, + SetResourceLimits, +} +import com.digitalasset.canton.admin.api.client.commands.{ + DomainTimeCommands, + LedgerApiCommands, + ParticipantAdminCommands, + PruningSchedulerCommands, +} +import com.digitalasset.canton.admin.api.client.data.{ + DarMetadata, + ListConnectedDomainsResult, + ParticipantPruningSchedule, +} +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.config.{DomainTimeTrackerConfig, NonNegativeDuration} +import com.digitalasset.canton.console.{ + AdminCommandRunner, + BaseInspection, + CommandFailure, + ConsoleEnvironment, + ConsoleMacros, + DomainReference, + FeatureFlag, + FeatureFlagFilter, + Help, + Helpful, + InstanceReferenceWithSequencerConnection, + LedgerApiCommandRunner, + ParticipantReference, + ParticipantReferenceCommon, +} +import com.digitalasset.canton.crypto.SyncCryptoApiProvider +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.health.admin.data.ParticipantStatus +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} +import com.digitalasset.canton.participant.ParticipantNode +import com.digitalasset.canton.participant.admin.grpc.TransferSearchResult +import com.digitalasset.canton.participant.admin.inspection.SyncStateInspection +import com.digitalasset.canton.participant.admin.v0.PruningServiceGrpc +import com.digitalasset.canton.participant.admin.v0.PruningServiceGrpc.PruningServiceStub +import com.digitalasset.canton.participant.admin.{ResourceLimits, v0} +import com.digitalasset.canton.participant.domain.DomainConnectionConfig +import com.digitalasset.canton.participant.sync.TimestampedEvent +import com.digitalasset.canton.protocol.messages.{ + AcsCommitment, + CommitmentPeriod, + SignedProtocolMessage, +} +import com.digitalasset.canton.protocol.{ + LfCommittedTransaction, + LfContractId, + SerializableContract, + TransferId, +} +import com.digitalasset.canton.sequencing.{ + PossiblyIgnoredProtocolEvent, + SequencerConnection, + SequencerConnections, +} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.time.NonNegativeFiniteDuration +import com.digitalasset.canton.topology.{DomainId, ParticipantId, PartyId} +import com.digitalasset.canton.tracing.NoTracing +import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.canton.util.* +import com.digitalasset.canton.{ + DiscardOps, + DomainAlias, + LedgerApplicationId, + SequencerAlias, + config, +} + +import java.time.Instant +import java.util.UUID +import scala.concurrent.TimeoutException +import scala.concurrent.duration.Duration + +sealed trait DomainChoice +object DomainChoice { + object All extends DomainChoice + final case class Only(aliases: Seq[DomainAlias]) extends DomainChoice +} + +private[console] object ParticipantCommands { + + object dars { + + def upload( + runner: AdminCommandRunner, + path: String, + vetAllPackages: Boolean, + synchronizeVetting: Boolean, + logger: TracedLogger, + ) = + runner.adminCommand( + ParticipantAdminCommands.Package + .UploadDar(Some(path), vetAllPackages, synchronizeVetting, logger) + ) + + } + + object domains { + + def referenceToConfig( + domain: NonEmpty[Map[SequencerAlias, InstanceReferenceWithSequencerConnection]], + manualConnect: Boolean = false, + alias: Option[DomainAlias] = None, + maxRetryDelay: Option[NonNegativeFiniteDuration] = None, + priority: Int = 0, + sequencerTrustThreshold: PositiveInt = PositiveInt.tryCreate(1), + ): DomainConnectionConfig = { + val domainAlias = alias.getOrElse( + DomainAlias.tryCreate(domain.head1._2.name) + ) // TODO(#14048): Come up with a good way of giving it a good alias + DomainConnectionConfig( + domainAlias, + SequencerConnections.tryMany( + domain.toSeq.map { case (alias, domain) => + domain.sequencerConnection.withAlias(alias) + }, + sequencerTrustThreshold, + ), + manualConnect = manualConnect, + None, + priority, + None, + maxRetryDelay, + DomainTimeTrackerConfig(), + ) + } + + def toConfig( + domainAlias: DomainAlias, + connection: String, + manualConnect: Boolean = false, + domainId: Option[DomainId] = None, + certificatesPath: String = "", + priority: Int = 0, + initialRetryDelay: Option[NonNegativeFiniteDuration] = None, + maxRetryDelay: Option[NonNegativeFiniteDuration] = None, + timeTrackerConfig: DomainTimeTrackerConfig = DomainTimeTrackerConfig(), + ): DomainConnectionConfig = { + // architecture-handbook-entry-begin: OnboardParticipantToConfig + val certificates = OptionUtil.emptyStringAsNone(certificatesPath).map { path => + BinaryFileUtil.readByteStringFromFile(path) match { + case Left(err) => throw new IllegalArgumentException(s"failed to load ${path}: ${err}") + case Right(bs) => bs + } + } + DomainConnectionConfig.grpc( + SequencerAlias.Default, + domainAlias, + connection, + manualConnect, + domainId, + certificates, + priority, + initialRetryDelay, + maxRetryDelay, + timeTrackerConfig, + ) + // architecture-handbook-entry-end: OnboardParticipantToConfig + } + + def register(runner: AdminCommandRunner, config: DomainConnectionConfig) = + runner.adminCommand( + ParticipantAdminCommands.DomainConnectivity.RegisterDomain(config) + ) + def reconnect(runner: AdminCommandRunner, domainAlias: DomainAlias, retry: Boolean) = { + runner.adminCommand( + ParticipantAdminCommands.DomainConnectivity.ConnectDomain(domainAlias, retry) + ) + } + + def list_connected(runner: AdminCommandRunner) = + runner.adminCommand( + ParticipantAdminCommands.DomainConnectivity.ListConnectedDomains() + ) + + def reconnect_all(runner: AdminCommandRunner, ignoreFailures: Boolean) = + runner.adminCommand( + ParticipantAdminCommands.DomainConnectivity.ReconnectDomains(ignoreFailures) + ) + + def disconnect(runner: AdminCommandRunner, domainAlias: DomainAlias) = + runner.adminCommand(ParticipantAdminCommands.DomainConnectivity.DisconnectDomain(domainAlias)) + + } +} + +class ParticipantTestingGroup( + participantRef: ParticipantReferenceCommon, + val consoleEnvironment: ConsoleEnvironment, + val loggerFactory: NamedLoggerFactory, +) extends FeatureFlagFilter + with Helpful { + import participantRef.* + + @Help.Summary( + "Send a bong to a set of target parties over the ledger. Levels > 0 leads to an exploding ping with exponential number of contracts. " + + "Throw a RuntimeException in case of failure.", + FeatureFlag.Testing, + ) + @Help.Description( + """Initiates a racy ping to multiple participants, + measuring the roundtrip time of the fastest responder, with an optional timeout. + Grace-period is the time the bong will wait for a duplicate spent (which would indicate an error in the system) before exiting. + If levels > 0, the ping command will lead to a binary explosion and subsequent dilation of + contracts, where ``level`` determines the number of levels we will explode. As a result, the system will create + (2^(L+2) - 3) contracts (where L stands for ``level``). + Normally, only the initiator is a validator. Additional validators can be added using the validators argument. + The bong command comes handy to run a burst test against the system and quickly leads to an overloading state.""" + ) + def bong( + targets: Set[ParticipantId], + validators: Set[ParticipantId] = Set(), + timeout: NonNegativeDuration = consoleEnvironment.commandTimeouts.testingBong, + levels: Long = 0, + gracePeriodMillis: Long = 1000, + workflowId: String = "", + id: String = "", + ): Duration = { + consoleEnvironment.runE( + maybe_bong(targets, validators, timeout, levels, gracePeriodMillis, workflowId, id) + .toRight( + s"Unable to bong $targets with $levels levels within ${LoggerUtil.roundDurationForHumans(timeout.duration)}" + ) + ) + } + + @Help.Summary("Like bong, but returns None in case of failure.", FeatureFlag.Testing) + def maybe_bong( + targets: Set[ParticipantId], + validators: Set[ParticipantId] = Set(), + timeout: NonNegativeDuration = consoleEnvironment.commandTimeouts.testingBong, + levels: Long = 0, + gracePeriodMillis: Long = 1000, + workflowId: String = "", + id: String = "", + ): Option[Duration] = + check(FeatureFlag.Testing)(consoleEnvironment.run { + adminCommand( + ParticipantAdminCommands.Ping + .Ping( + targets.map(_.adminParty.toLf), + validators.map(_.adminParty.toLf), + timeout.duration.toMillis, + levels, + gracePeriodMillis, + workflowId, + id, + ) + ) + }) + + @Help.Summary("Fetch the current time from the given domain", FeatureFlag.Testing) + def fetch_domain_time( + domainAlias: DomainAlias, + timeout: NonNegativeDuration, + ): CantonTimestamp = + check(FeatureFlag.Testing) { + val id = participantRef.domains.id_of(domainAlias) + fetch_domain_time(id, timeout) + } + + @Help.Summary("Fetch the current time from the given domain", FeatureFlag.Testing) + def fetch_domain_time( + domainId: DomainId, + timeout: NonNegativeDuration = consoleEnvironment.commandTimeouts.ledgerCommand, + ): CantonTimestamp = + check(FeatureFlag.Testing) { + consoleEnvironment.run { + adminCommand( + DomainTimeCommands.FetchTime( + domainId.some, + NonNegativeFiniteDuration.Zero, + timeout, + ) + ) + }.timestamp + } + + @Help.Summary("Fetch the current time from all connected domains", FeatureFlag.Testing) + def fetch_domain_times( + timeout: NonNegativeDuration = consoleEnvironment.commandTimeouts.ledgerCommand + ): Unit = + check(FeatureFlag.Testing) { + participantRef.domains.list_connected().foreach { item => + fetch_domain_time(item.domainId, timeout).discard[CantonTimestamp] + } + } + + @Help.Summary("Await for the given time to be reached on the given domain", FeatureFlag.Testing) + def await_domain_time( + domainAlias: DomainAlias, + time: CantonTimestamp, + timeout: NonNegativeDuration, + ): Unit = + check(FeatureFlag.Testing) { + val id = participantRef.domains.id_of(domainAlias) + await_domain_time(id, time, timeout) + } + + @Help.Summary("Await for the given time to be reached on the given domain", FeatureFlag.Testing) + def await_domain_time( + domainId: DomainId, + time: CantonTimestamp, + timeout: NonNegativeDuration = consoleEnvironment.commandTimeouts.ledgerCommand, + ): Unit = + check(FeatureFlag.Testing) { + consoleEnvironment.run { + adminCommand( + DomainTimeCommands.AwaitTime( + domainId.some, + time, + timeout, + ) + ) + } + } +} + +class LocalParticipantTestingGroup( + participantRef: ParticipantReference with BaseInspection[ParticipantNode], + consoleEnvironment: ConsoleEnvironment, + loggerFactory: NamedLoggerFactory, +) extends ParticipantTestingGroup(participantRef, consoleEnvironment, loggerFactory) + with FeatureFlagFilter + with NoTracing { + + protected def defaultLimit: PositiveInt = + consoleEnvironment.environment.config.parameters.console.defaultLimit + + import participantRef.* + @Help.Summary("Lookup contracts in the Private Contract Store", FeatureFlag.Testing) + @Help.Description("""Get raw access to the PCS of the given domain sync controller. + The filter commands will check if the target value ``contains`` the given string. + The arguments can be started with ``^`` such that ``startsWith`` is used for comparison or ``!`` to use ``equals``. + The ``activeSet`` argument allows to restrict the search to the active contract set. + """) + def pcs_search( + domainAlias: DomainAlias, + // filter by id (which is txId::discriminator, so can be used to look for both) + filterId: String = "", + filterPackage: String = "", + filterTemplate: String = "", + // only include active contracts + activeSet: Boolean = false, + limit: PositiveInt = defaultLimit, + ): List[(Boolean, SerializableContract)] = { + def toOpt(str: String) = OptionUtil.emptyStringAsNone(str) + + val pcs = state_inspection + .findContracts( + domainAlias, + toOpt(filterId), + toOpt(filterPackage), + toOpt(filterTemplate), + limit.value, + ) + if (activeSet) pcs.filter { case (isActive, _) => isActive } + else pcs + } + + @Help.Summary("Lookup of active contracts", FeatureFlag.Testing) + def acs_search( + domainAlias: DomainAlias, + // filter by id (which is txId::discriminator, so can be used to look for both) + filterId: String = "", + filterPackage: String = "", + filterTemplate: String = "", + filterStakeholder: Option[PartyId] = None, + limit: PositiveInt = defaultLimit, + ): List[SerializableContract] = { + val predicate = (c: SerializableContract) => + filterStakeholder.forall(s => c.metadata.stakeholders.contains(s.toLf)) + + check(FeatureFlag.Testing) { + pcs_search(domainAlias, filterId, filterPackage, filterTemplate, activeSet = true, limit) + .map(_._2) + .filter(predicate) + } + } + + @Help.Summary("Lookup of events", FeatureFlag.Testing) + @Help.Description( + """Show the event logs. To select only events from a particular domain, use the domain alias. + Leave the domain empty to search the combined event log containing the events of all domains. + Note that if the domain is left blank, the values of `from` and `to` cannot be set. + This is because the combined event log isn't guaranteed to have increasing timestamps. + """ + ) + def event_search( + domain: Option[DomainAlias] = None, + from: Option[Instant] = None, + to: Option[Instant] = None, + limit: PositiveInt = defaultLimit, + ): Seq[(String, TimestampedEvent)] = { + check(FeatureFlag.Testing) { + if (domain.isEmpty && (from.isDefined || to.isDefined)) { + logger.error( + s"You are not allowed to set values for 'from' and 'to' if searching the combined event log " + + s"(you are searching the combined event log because you left the domain blank)." + ) + throw new CommandFailure() + } else { + stateInspection.findEvents( + domain, + from.map(timestampFromInstant), + to.map(timestampFromInstant), + Some(limit.value), + ) + } + } + } + + @Help.Summary("Lookup of accepted transactions", FeatureFlag.Testing) + @Help.Description("""Show the accepted transactions as they appear in the event logs. + To select only transactions from a particular domain, use the domain alias. + Leave the domain empty to search the combined event log containing the events of all domains. + Note that if the domain is left blank, the values of `from` and `to` cannot be set. + This is because the combined event log isn't guaranteed to have increasing timestamps. + """) + def transaction_search( + domain: Option[DomainAlias] = None, + from: Option[Instant] = None, + to: Option[Instant] = None, + limit: PositiveInt = defaultLimit, + ): Seq[(String, LfCommittedTransaction)] = + check(FeatureFlag.Testing) { + if (domain.isEmpty && (from.isDefined || to.isDefined)) { + logger.error( + s"You are not allowed to set values for 'from' and 'to' if searching the combined event log " + + s"(you are searching the combined event log because you left the domain blank)." + ) + throw new CommandFailure() + } else { + stateInspection.findAcceptedTransactions( + domain, + from.map(timestampFromInstant), + to.map(timestampFromInstant), + Some(limit.value), + ) + } + } + + @Help.Summary("Retrieve all sequencer messages", FeatureFlag.Testing) + @Help.Description("""Optionally allows filtering for sequencer from a certain time span (inclusive on both ends) and + |limiting the number of displayed messages. The returned messages will be ordered on most domain ledger implementations + |if a time span is given. + | + |Fails if the participant has never connected to the domain.""") + def sequencer_messages( + domain: DomainAlias, + from: Option[Instant] = None, + to: Option[Instant] = None, + limit: PositiveInt = defaultLimit, + ): Seq[PossiblyIgnoredProtocolEvent] = + state_inspection.findMessages(domain, from, to, Some(limit.value)) + + @Help.Summary( + "Return the sync crypto api provider, which provides access to all cryptographic methods", + FeatureFlag.Testing, + ) + def crypto_api(): SyncCryptoApiProvider = check(FeatureFlag.Testing) { + access(node => node.sync.syncCrypto) + } + + @Help.Summary( + "The latest timestamp before or at the given one for which no commitment is outstanding", + FeatureFlag.Testing, + ) + @Help.Description( + """The latest timestamp before or at the given one for which no commitment is outstanding. + |Note that this doesn't imply that pruning is possible at this timestamp, as the system might require some + |additional data for crash recovery. Thus, this is useful for testing commitments; use the commands in the pruning + |group for pruning. + |Additionally, the result needn't fall on a "commitment tick" as specified by the reconciliation interval.""" + ) + def find_clean_commitments_timestamp( + domain: DomainAlias, + beforeOrAt: CantonTimestamp = CantonTimestamp.now(), + ): Option[CantonTimestamp] = + state_inspection.noOutstandingCommitmentsTs(domain, beforeOrAt) + + @Help.Summary( + "Obtain access to the state inspection interface. Use at your own risk.", + FeatureFlag.Testing, + ) + @Help.Description( + """The state inspection methods can fatally and permanently corrupt the state of a participant. + |The API is subject to change in any way.""" + ) + def state_inspection: SyncStateInspection = check(FeatureFlag.Testing) { stateInspection } + + private def stateInspection: SyncStateInspection = access(node => node.sync.stateInspection) + +} + +class ParticipantPruningAdministrationGroup( + runner: LedgerApiCommandRunner & AdminCommandRunner, + consoleEnvironment: ConsoleEnvironment, + loggerFactory: NamedLoggerFactory, +) extends PruningSchedulerAdministration( + runner, + consoleEnvironment, + new PruningSchedulerCommands[PruningServiceStub]( + PruningServiceGrpc.stub, + _.setSchedule(_), + _.clearSchedule(_), + _.setCron(_), + _.setMaxDuration(_), + _.setRetention(_), + _.getSchedule(_), + ), + loggerFactory, + ) + with FeatureFlagFilter + with Helpful { + + import runner.* + + @Help.Summary("Prune the ledger up to the specified offset inclusively.") + @Help.Description( + """Prunes the participant ledger up to the specified offset inclusively returning ``Unit`` if the ledger has been + |successfully pruned. + |Note that upon successful pruning, subsequent attempts to read transactions via ``ledger_api.transactions.flat`` or + |``ledger_api.transactions.trees`` or command completions via ``ledger_api.completions.list`` by specifying a begin offset + |lower than the returned pruning offset will result in a ``NOT_FOUND`` error. + |In the Enterprise Edition, ``prune`` performs a "full prune" freeing up significantly more space and also + |performs additional safety checks returning a ``NOT_FOUND`` error if ``pruneUpTo`` is higher than the + |offset returned by ``find_safe_offset`` on any domain with events preceding the pruning offset.""" + ) + def prune(pruneUpTo: LedgerOffset): Unit = + consoleEnvironment.run( + ledgerApiCommand(LedgerApiCommands.ParticipantPruningService.Prune(pruneUpTo)) + ) + + @Help.Summary( + "Return the highest participant ledger offset whose record time is before or at the given one (if any) at which pruning is safely possible", + FeatureFlag.Preview, + ) + def find_safe_offset(beforeOrAt: Instant = Instant.now()): Option[LedgerOffset] = { + check(FeatureFlag.Preview) { + val ledgerEnd = consoleEnvironment.run( + ledgerApiCommand(LedgerApiCommands.TransactionService.GetLedgerEnd()) + ) + consoleEnvironment.run( + adminCommand( + ParticipantAdminCommands.Pruning.GetSafePruningOffsetCommand(beforeOrAt, ledgerEnd) + ) + ) + } + } + + @Help.Summary( + "Prune only internal ledger state up to the specified offset inclusively.", + FeatureFlag.Preview, + ) + @Help.Description( + """Special-purpose variant of the ``prune`` command only available in the Enterprise Edition that prunes only partial, + |internal participant ledger state freeing up space not needed for serving ``ledger_api.transactions`` + |and ``ledger_api.completions`` requests. In conjunction with ``prune``, ``prune_internally`` enables pruning + |internal ledger state more aggressively than externally observable data via the ledger api. In most use cases + |``prune`` should be used instead. Unlike ``prune``, ``prune_internally`` has no visible effect on the Ledger API. + |The command returns ``Unit`` if the ledger has been successfully pruned or an error if the timestamp + |performs additional safety checks returning a ``NOT_FOUND`` error if ``pruneUpTo`` is higher than the + |offset returned by ``find_safe_offset`` on any domain with events preceding the pruning offset.""" + ) + // Consider adding an "Enterprise" annotation if we end up having more enterprise-only commands than this lone enterprise command. + def prune_internally(pruneUpTo: LedgerOffset): Unit = + check(FeatureFlag.Preview) { + consoleEnvironment.run( + adminCommand(ParticipantAdminCommands.Pruning.PruneInternallyCommand(pruneUpTo)) + ) + } + + @Help.Summary( + "Activate automatic pruning according to the specified schedule with participant-specific options." + ) + @Help.Description( + """Refer to the ``set_schedule`` description for information about the "cron", "max_duration", and "retention" + |parameters. Setting the "prune_internally_only" flag causes pruning to only remove internal state as described in + |more detail in the ``prune_internally`` command description. + """ + ) + def set_participant_schedule( + cron: String, + maxDuration: config.PositiveDurationSeconds, + retention: config.PositiveDurationSeconds, + pruneInternallyOnly: Boolean = false, + ): Unit = + check(FeatureFlag.Preview) { + consoleEnvironment.run( + runner.adminCommand( + SetParticipantScheduleCommand( + cron, + maxDuration, + retention, + pruneInternallyOnly, + ) + ) + ) + } + + @Help.Summary("Inspect the automatic, participant-specific pruning schedule.") + @Help.Description( + """The schedule consists of a "cron" expression and "max_duration" and "retention" durations as described in the + |``get_schedule`` command description. Additionally "prune_internally" indicates if the schedule mandates + |pruning of internal state. + """ + ) + def get_participant_schedule(): Option[ParticipantPruningSchedule] = + consoleEnvironment.run( + runner.adminCommand(GetParticipantScheduleCommand()) + ) + + @Help.Summary( + "Identify the participant ledger offset to prune up to based on the specified timestamp." + ) + @Help.Description( + """Return the largest participant ledger offset that has been processed before or at the specified timestamp. + |The time is measured on the participant's local clock at some point while the participant has processed the + |the event. Returns ``None`` if no such offset exists. + """ + ) + def get_offset_by_time(upToInclusive: Instant): Option[LedgerOffset] = + consoleEnvironment.run( + adminCommand( + ParticipantAdminCommands.Inspection.LookupOffsetByTime( + ProtoConverter.InstantConverter.toProtoPrimitive(upToInclusive) + ) + ) + ) match { + case "" => None + case offset => Some(LedgerOffset(LedgerOffset.Value.Absolute(offset))) + } + + @Help.Summary("Identify the participant ledger offset to prune up to.", FeatureFlag.Preview) + @Help.Description( + """Return the participant ledger offset that corresponds to pruning "n" number of transactions + |from the beginning of the ledger. Errors if the ledger holds less than "n" transactions. Specifying "n" of 1 + |returns the offset of the first transaction (if the ledger is non-empty). + """ + ) + def locate_offset(n: Long): LedgerOffset = + check(FeatureFlag.Preview) { + val rawOffset = consoleEnvironment.run( + adminCommand(ParticipantAdminCommands.Inspection.LookupOffsetByIndex(n)) + ) + LedgerOffset(LedgerOffset.Value.Absolute(rawOffset)) + } + +} + +class LocalCommitmentsAdministrationGroup( + runner: AdminCommandRunner with BaseInspection[ParticipantNode], + val consoleEnvironment: ConsoleEnvironment, + val loggerFactory: NamedLoggerFactory, +) extends FeatureFlagFilter + with Helpful + with NoTracing { + + import runner.* + + @Help.Summary( + "Lookup ACS commitments received from other participants as part of the reconciliation protocol" + ) + @Help.Description("""The arguments are: + - domain: the alias of the domain + - start: lowest time exclusive + - end: highest time inclusive + - counterParticipant: optionally filter by counter participant + """) + def received( + domain: DomainAlias, + start: Instant, + end: Instant, + counterParticipant: Option[ParticipantId] = None, + ): Iterable[SignedProtocolMessage[AcsCommitment]] = { + access(node => + node.sync.stateInspection + .findReceivedCommitments( + domain, + timestampFromInstant(start), + timestampFromInstant(end), + counterParticipant, + ) + ) + } + + @Help.Summary("Lookup ACS commitments locally computed as part of the reconciliation protocol") + def computed( + domain: DomainAlias, + start: Instant, + end: Instant, + counterParticipant: Option[ParticipantId] = None, + ): Iterable[(CommitmentPeriod, ParticipantId, AcsCommitment.CommitmentType)] = + access { node => + node.sync.stateInspection.findComputedCommitments( + domain, + timestampFromInstant(start), + timestampFromInstant(end), + counterParticipant, + ) + } + + def outstanding( + domain: DomainAlias, + start: Instant, + end: Instant, + counterParticipant: Option[ParticipantId] = None, + ): Iterable[(CommitmentPeriod, ParticipantId)] = + access { node => + node.sync.stateInspection.outstandingCommitments( + domain, + timestampFromInstant(start), + timestampFromInstant(end), + counterParticipant, + ) + } + +} + +class ParticipantReplicationAdministrationGroup( + runner: AdminCommandRunner, + consoleEnvironment: ConsoleEnvironment, +) extends Helpful { + + @Help.Summary("Set the participant replica to passive") + @Help.Description( + "Trigger a graceful fail-over from this active replica to another passive replica." + ) + def set_passive(): Unit = { + consoleEnvironment.run { + runner.adminCommand( + ParticipantAdminCommands.Replication.SetPassiveCommand() + ) + } + } + +} + +/** Administration commands supported by a participant. + */ +trait ParticipantAdministration extends FeatureFlagFilter { + this: AdminCommandRunner + with LedgerApiCommandRunner + with LedgerApiAdministration + with NamedLogging => + + import ConsoleEnvironment.Implicits.* + implicit protected val consoleEnvironment: ConsoleEnvironment + + private val runner = this + + def id: ParticipantId + + protected def vettedPackagesOfParticipant(): Set[PackageId] + protected def participantIsActiveOnDomain( + domainId: DomainId, + participantId: ParticipantId, + ): Boolean + + @Help.Summary("Manage DAR packages") + @Help.Group("DAR Management") + object dars extends Helpful { + @Help.Summary( + "Remove a DAR from the participant", + FeatureFlag.Preview, + ) + @Help.Description( + """Can be used to remove a DAR from the participant, if the following conditions are satisfied: + |1. The main package of the DAR must be unused -- there should be no active contract from this package + | + |2. All package dependencies of the DAR should either be unused or contained in another of the participant node's uploaded DARs. Canton uses this restriction to ensure that the package dependencies of the DAR don't become "stranded" if they're in use. + | + |3. The main package of the dar should not be vetted. If it is vetted, Canton will try to automatically + | revoke the vetting for the main package of the DAR, but this automatic vetting revocation will only succeed if the + | main package vetting originates from a standard ``dars.upload``. Even if the automatic revocation fails, you can + | always manually revoke the package vetting. + | + |If synchronizeVetting is true (default), then the command will block until the participant has observed the vetting transactions to be registered with the domain. + |""" + ) + def remove(darHash: String, synchronizeVetting: Boolean = true): Unit = { + check(FeatureFlag.Preview)(consoleEnvironment.run { + adminCommand(ParticipantAdminCommands.Package.RemoveDar(darHash)) + }) + if (synchronizeVetting) { + packages.synchronize_vetting() + } + } + + @Help.Summary("List installed DAR files") + @Help.Description("""List DARs installed on this participant + |The arguments are: + | filterName: filter by name (source description) + | limit: Limit number of results (default none) + """) + def list(limit: PositiveInt = defaultLimit, filterName: String = ""): Seq[v0.DarDescription] = + consoleEnvironment + .run { + adminCommand(ParticipantAdminCommands.Package.ListDars(limit)) + } + .filter(_.name.startsWith(filterName)) + + @Help.Summary("List contents of DAR files") + def list_contents(hash: String): DarMetadata = consoleEnvironment.run { + adminCommand( + ParticipantAdminCommands.Package.ListDarContents(hash) + ) + } + + @Help.Summary("Upload a Dar to Canton") + @Help.Description("""Daml code is normally shipped as a Dar archive and must explicitly be uploaded to a participant. + |A Dar is a collection of LF-packages, the native binary representation of Daml smart contracts. + |In order to use Daml templates on a participant, the Dar must first be uploaded and then + |vetted by the participant. Vetting will ensure that other participants can check whether they + |can actually send a transaction referring to a particular Daml package and participant. + |Vetting is done by registering a VettedPackages topology transaction with the topology manager. + |By default, vetting happens automatically and this command waits for + |the vetting transaction to be successfully registered on all connected domains. + |This is the safe default setting minimizing race conditions. + | + |If vetAllPackages is true (default), the packages will all be vetted on all domains the participant is registered. + |If synchronizeVetting is true (default), then the command will block until the participant has observed the vetting transactions to be registered with the domain. + | + |Note that synchronize vetting might block on permissioned domains that do not just allow participants to update the topology state. + |In such cases, synchronizeVetting should be turned off. + |Synchronize vetting can be invoked manually using $participant.package.synchronize_vettings() + |""") + def upload( + path: String, + vetAllPackages: Boolean = true, + synchronizeVetting: Boolean = true, + ): String = { + val res = consoleEnvironment.runE { + for { + hash <- ParticipantCommands.dars + .upload(runner, path, vetAllPackages, synchronizeVetting, logger) + .toEither + } yield hash + } + if (synchronizeVetting && vetAllPackages) { + packages.synchronize_vetting() + } + res + } + + @Help.Summary("Downloads the DAR file with the given hash to the given directory") + def download(darHash: String, directory: String): Unit = { + val _ = consoleEnvironment.run { + adminCommand( + ParticipantAdminCommands.Package.GetDar(Some(darHash), Some(directory), logger) + ) + } + } + + @Help.Summary("Change DAR vetting status") + @Help.Group("Vetting") + object vetting extends Helpful { + @Help.Summary( + "Vet all packages contained in the DAR archive identified by the provided DAR hash." + ) + def enable(darHash: String, synchronize: Boolean = true): Unit = + check(FeatureFlag.Preview)(consoleEnvironment.run { + adminCommand(ParticipantAdminCommands.Package.VetDar(darHash, synchronize)) + }) + + @Help.Summary("""Revoke vetting for all packages contained in the DAR archive + |identified by the provided DAR hash.""") + @Help.Description("""This command succeeds if the vetting command used to vet the DAR's packages + |was symmetric and resulted in a single vetting topology transaction for all the packages in the DAR. + |This command is potentially dangerous and misuse + |can lead the participant to fail in processing transactions""") + def disable(darHash: String): Unit = + check(FeatureFlag.Preview)(consoleEnvironment.run { + adminCommand(ParticipantAdminCommands.Package.UnvetDar(darHash)) + }) + } + + } + + @Help.Summary("Manage raw Daml-LF packages") + @Help.Group("Package Management") + object packages extends Helpful { + + @Help.Summary("List packages stored on the participant") + @Help.Description("""Supported arguments: + limit - Limit on the number of packages returned (defaults to canton.parameters.console.default-limit) + """) + def list(limit: PositiveInt = defaultLimit): Seq[v0.PackageDescription] = + consoleEnvironment.run { + adminCommand(ParticipantAdminCommands.Package.List(limit)) + } + + @Help.Summary("List package contents") + def list_contents(packageId: String): Seq[v0.ModuleDescription] = consoleEnvironment.run { + adminCommand(ParticipantAdminCommands.Package.ListContents(packageId)) + } + + @Help.Summary("Find packages that contain a module with the given name") + def find( + moduleName: String, + limitPackages: PositiveInt = defaultLimit, + ): Seq[v0.PackageDescription] = consoleEnvironment.runE { + val packageC = adminCommand(ParticipantAdminCommands.Package.List(limitPackages)).toEither + val matchingC = packageC + .flatMap { packages => + packages.traverse(x => + adminCommand(ParticipantAdminCommands.Package.ListContents(x.packageId)).toEither.map( + r => (x, r) + ) + ) + } + matchingC.map(_.filter { case (_, content) => + content.map(_.name).contains(moduleName) + }.map(_._1)) + } + + @Help.Summary( + "Remove the package from Canton's package store.", + FeatureFlag.Preview, + ) + @Help.Description( + """The standard operation of this command checks that a package is unused and unvetted, and if so + |removes the package. The force flag can be used to disable the checks, but do not use the force flag unless + |you're certain you know what you're doing. """ + ) + def remove(packageId: String, force: Boolean = false): Unit = { + check(FeatureFlag.Preview)(consoleEnvironment.run { + adminCommand(ParticipantAdminCommands.Package.RemovePackage(packageId, force)) + }) + } + + @Help.Summary( + "Ensure that all vetting transactions issued by this participant have been observed by all configured participants" + ) + @Help.Description("""Sometimes, when scripting tests and demos, a dar or package is uploaded and we need to ensure + |that commands are only submitted once the package vetting has been observed by some other connected participant + |known to the console. This command can be used in such cases.""") + def synchronize_vetting( + timeout: NonNegativeDuration = consoleEnvironment.commandTimeouts.bounded + ): Unit = { + val connected = domains.list_connected().map(_.domainId).toSet + + // ensure that the ledger api server has seen all packages + try { + AdminCommandRunner.retryUntilTrue(timeout) { + val canton = packages.list().map(_.packageId).toSet + val maxPackages = PositiveInt.tryCreate(1000) + val lApi = consoleEnvironment + .run { + ledgerApiCommand( + LedgerApiCommands.PackageService.ListKnownPackages(maxPackages) + ) + } + .map(_.packageId) + .toSet + // don't synchronise anymore in a big production system (as we only need this truly for testing) + (lApi.size >= maxPackages.value) || (canton -- lApi).isEmpty + } + } catch { + case _: TimeoutException => + logger.error( + show"Participant $id ledger Api server has still a different set of packages than the sync server" + ) + } + + def waitForPackages( + topology: TopologyAdministrationGroup, + observer: String, + domainId: DomainId, + ): Unit = { + try { + AdminCommandRunner + .retryUntilTrue(timeout) { + // ensure that vetted packages on the domain match the ones in the authorized store + val onDomain = topology.vetted_packages + .list(filterStore = domainId.filterString, filterParticipant = id.filterString) + .flatMap(_.item.packageIds) + .toSet + val vetted = vettedPackagesOfParticipant() + val ret = vetted == onDomain + if (!ret) { + logger.debug( + show"Still waiting for package vetting updates to be observed by $observer on $domainId: vetted - onDomain is ${vetted -- onDomain} while onDomain -- vetted is ${onDomain -- vetted}" + ) + } + ret + } + .discard + } catch { + case _: TimeoutException => + logger.error( + show"$observer has not observed all vetting txs of $id on domain $domainId within the given timeout." + ) + } + } + + // for every domain this participant is connected to + consoleEnvironment.domains.all + .filter(d => d.health.running() && d.health.initialized() && connected.contains(d.id)) + .foreach { domain => + waitForPackages(domain.topology, s"Domain ${domain.name}", domain.id) + } + + // for every participant + consoleEnvironment.participants.all + .filter(p => p.health.running() && p.health.initialized()) + .foreach { participant => + // for every domain this participant is connected to as well + participant.domains.list_connected().foreach { + case item if connected.contains(item.domainId) => + waitForPackages( + participant.topology, + s"Participant ${participant.name}", + item.domainId, + ) + case _ => + } + } + } + + } + + @Help.Summary("Manage domain connections") + @Help.Group("Domains") + object domains extends Helpful { + + @Help.Summary("Returns the id of the given domain alias") + def id_of(domainAlias: DomainAlias): DomainId = { + consoleEnvironment.run { + adminCommand(ParticipantAdminCommands.DomainConnectivity.GetDomainId(domainAlias)) + } + } + + @Help.Summary( + "Test whether a participant is connected to and permissioned on a domain." + ) + @Help.Description( + """Yields false, if the domain is not connected or not healthy. + |Yields false, if the domain is configured in the Canton configuration and + |the participant is not active from the perspective of the domain.""" + ) + def active(domainAlias: DomainAlias): Boolean = { + list_connected().exists(r => { + val domainReferenceO = consoleEnvironment.nodes.all + .collectFirst { + case d: DomainAdministration + if d.health.status.successOption.exists(_.uid == r.domainId.unwrap) => + d + } + + r.domainAlias == domainAlias && + r.healthy && + participantIsActiveOnDomain(r.domainId, id) && + domainReferenceO.forall(_.participants.active(id)) + }) + } + + @Help.Summary( + "Test whether a participant is connected to and permissioned on a domain reference, both from the perspective of the participant and the domain." + ) + @Help.Description( + "Yields false, if the domain has not been initialized, is not connected or is not healthy." + ) + def active(reference: DomainAdministration): Boolean = { + val domainUidO = reference.health.status.successOption.map(_.uid) + list_connected() + .exists(r => + domainUidO.contains(r.domainId.unwrap) && + r.healthy && + participantIsActiveOnDomain(r.domainId, id) && + reference.participants.active(id) + ) + } + + @Help.Summary( + "Test whether a participant is connected to a domain reference" + ) + def is_connected(reference: DomainAdministration): Boolean = + list_connected().exists(_.domainId == reference.id) + + private def confirm_agreement(domainAlias: DomainAlias): Unit = { + + val response = get_agreement(domainAlias) + + val autoApprove = + sys.env.getOrElse("CANTON_AUTO_APPROVE_AGREEMENTS", "no").toLowerCase == "yes" + response.foreach { + case (agreement, accepted) if !accepted => + if (autoApprove) { + accept_agreement(domainAlias.unwrap, agreement.id) + } else { + println(s"Service Agreement for `$domainAlias`:") + println(agreement.text) + println("Do you accept the license? yes/no") + print("> ") + val answer = Option(scala.io.StdIn.readLine()) + if (answer.exists(_.toLowerCase == "yes")) + accept_agreement(domainAlias.unwrap, agreement.id) + } + case _ => () // Don't do anything if the license has already been accepted + } + } + + @Help.Summary( + "Macro to connect a participant to a locally configured domain given by reference" + ) + @Help.Description(""" + The arguments are: + domain - A local domain or sequencer reference + manualConnect - Whether this connection should be handled manually and also excluded from automatic re-connect. + alias - The name you will be using to refer to this domain. Can not be changed anymore. + certificatesPath - Path to TLS certificate files to use as a trust anchor. + priority - The priority of the domain. The higher the more likely a domain will be used. + synchronize - A timeout duration indicating how long to wait for all topology changes to have been effected on all local nodes. + """) + def connect_local( + domain: InstanceReferenceWithSequencerConnection, + manualConnect: Boolean = false, + alias: Option[DomainAlias] = None, + maxRetryDelayMillis: Option[Long] = None, + priority: Int = 0, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + ): Unit = { + val config = ParticipantCommands.domains.referenceToConfig( + NonEmpty.mk(Seq, SequencerAlias.Default -> domain).toMap, + manualConnect, + alias, + maxRetryDelayMillis.map(NonNegativeFiniteDuration.tryOfMillis), + priority, + ) + connectFromConfig(config, synchronize) + } + + def connect_local_bft( + domain: NonEmpty[Map[SequencerAlias, InstanceReferenceWithSequencerConnection]], + manualConnect: Boolean = false, + alias: Option[DomainAlias] = None, + maxRetryDelayMillis: Option[Long] = None, + priority: Int = 0, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + sequencerTrustThreshold: PositiveInt = PositiveInt.tryCreate(1), + ): Unit = { + val config = ParticipantCommands.domains.referenceToConfig( + domain, + manualConnect, + alias, + maxRetryDelayMillis.map(NonNegativeFiniteDuration.tryOfMillis), + priority, + sequencerTrustThreshold, + ) + connectFromConfig(config, synchronize) + } + + @Help.Summary("Macro to connect a participant to a domain given by connection") + @Help.Description("""This variant of connect expects a domain connection config. + |Otherwise the behaviour is equivalent to the connect command with explicit + |arguments. If the domain is already configured, the domain connection + |will be attempted. If however the domain is offline, the command will fail. + |Generally, this macro should only be used to setup a new domain. However, for + |convenience, we support idempotent invocations where subsequent calls just ensure + |that the participant reconnects to the domain. + |""") + def connect( + config: DomainConnectionConfig + ): Unit = { + connectFromConfig(config, None) + } + + private def connectFromConfig( + config: DomainConnectionConfig, + synchronize: Option[NonNegativeDuration], + ): Unit = { + val current = this.config(config.domain) + // if the config did not change, we'll just treat this as idempotent, otherwise, we'll use register to fail + if (current.isEmpty) { + // architecture-handbook-entry-begin: OnboardParticipantConnect + // register the domain configuration + register(config.copy(manualConnect = true)) + if (!config.manualConnect) { + // fetch and confirm domain agreement + if (config.sequencerConnections.nonBftSetup) { // agreement is removed with the introduction of BFT domain. + confirm_agreement(config.domain.unwrap) + } + reconnect(config.domain.unwrap, retry = false).discard + // now update the domain settings to auto-connect + modify(config.domain.unwrap, _.copy(manualConnect = false)) + } + // architecture-handbook-entry-end: OnboardParticipantConnect + } else if (!config.manualConnect) { + val _ = reconnect(config.domain, retry = false) + modify(config.domain.unwrap, _.copy(manualConnect = false)) + } + synchronize.foreach { timeout => + ConsoleMacros.utils.synchronize_topology(Some(timeout))(consoleEnvironment) + } + } + + @Help.Summary("Macro to connect a participant to a domain given by connection") + @Help.Description("""The connect macro performs a series of commands in order to connect this participant to a domain. + |First, `register` will be invoked with the given arguments, but first registered + |with manualConnect = true. If you already set manualConnect = true, then nothing else + |will happen and you will have to do the remaining steps yourselves. + |Otherwise, if the domain requires an agreement, it is fetched and presented to the user for evaluation. + |If the user is fine with it, the agreement is confirmed. If you want to auto-confirm, + |then set the environment variable CANTON_AUTO_APPROVE_AGREEMENTS=yes. + |Finally, the command will invoke `reconnect` to startup the connection. + |If the reconnect succeeded, the registered configuration will be updated + |with manualStart = true. If anything fails, the domain will remain registered with `manualConnect = true` and + |you will have to perform these steps manually. + The arguments are: + domainAlias - The name you will be using to refer to this domain. Can not be changed anymore. + connection - The connection string to connect to this domain. I.e. https://url:port + manualConnect - Whether this connection should be handled manually and also excluded from automatic re-connect. + domainId - Optionally the domainId you expect to see on this domain. + certificatesPath - Path to TLS certificate files to use as a trust anchor. + priority - The priority of the domain. The higher the more likely a domain will be used. + timeTrackerConfig - The configuration for the domain time tracker. + synchronize - A timeout duration indicating how long to wait for all topology changes to have been effected on all local nodes. + """) + def connect( + domainAlias: DomainAlias, + connection: String, + manualConnect: Boolean = false, + domainId: Option[DomainId] = None, + certificatesPath: String = "", + priority: Int = 0, + timeTrackerConfig: DomainTimeTrackerConfig = DomainTimeTrackerConfig(), + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + ): DomainConnectionConfig = { + val config = ParticipantCommands.domains.toConfig( + domainAlias, + connection, + manualConnect, + domainId, + certificatesPath, + priority, + timeTrackerConfig = timeTrackerConfig, + ) + connectFromConfig(config, synchronize) + config + } + + @Help.Summary( + "Macro to connect a participant to a domain that supports connecting via many endpoints" + ) + @Help.Description("""Domains can provide many endpoints to connect to for availability and performance benefits. + This version of connect allows specifying multiple endpoints for a single domain connection: + connect_multi("mydomain", Seq(sequencer1, sequencer2)) + or: + connect_multi("mydomain", Seq("https://host1.mydomain.net", "https://host2.mydomain.net", "https://host3.mydomain.net")) + + To create a more advanced connection config use domains.toConfig with a single host, + |then use config.addConnection to add additional connections before connecting: + config = myparticipaint.domains.toConfig("mydomain", "https://host1.mydomain.net", ...otherArguments) + config = config.addConnection("https://host2.mydomain.net", "https://host3.mydomain.net") + myparticipant.domains.connect(config) + + The arguments are: + domainAlias - The name you will be using to refer to this domain. Can not be changed anymore. + connections - The sequencer connection definitions (can be an URL) to connect to this domain. I.e. https://url:port + synchronize - A timeout duration indicating how long to wait for all topology changes to have been effected on all local nodes. + """) + def connect_multi( + domainAlias: DomainAlias, + connections: Seq[SequencerConnection], + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + ): DomainConnectionConfig = { + val sequencerConnection = + SequencerConnection.merge(connections).getOrElse(sys.error("Invalid sequencer connection")) + val sequencerConnections = + SequencerConnections.single(sequencerConnection) + val config = DomainConnectionConfig( + domainAlias, + sequencerConnections, + ) + connectFromConfig(config, synchronize) + config + } + + @Help.Summary("Reconnect this participant to the given domain") + @Help.Description("""Idempotent attempts to re-establish a connection to a certain domain. + |If retry is set to false, the command will throw an exception if unsuccessful. + |If retry is set to true, the command will terminate after the first attempt with the result, + |but the server will keep on retrying to connect to the domain. + | + The arguments are: + domainAlias - The name you will be using to refer to this domain. Can not be changed anymore. + retry - Whether the reconnect should keep on retrying until it succeeded or abort noisly if the connection attempt fails. + synchronize - A timeout duration indicating how long to wait for all topology changes to have been effected on all local nodes. + """) + def reconnect( + domainAlias: DomainAlias, + retry: Boolean = true, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + ): Boolean = { + val ret = consoleEnvironment.run { + adminCommand(ParticipantAdminCommands.DomainConnectivity.ConnectDomain(domainAlias, retry)) + } + if (ret) { + synchronize.foreach { timeout => + ConsoleMacros.utils.synchronize_topology(Some(timeout))(consoleEnvironment) + } + } + ret + } + + @Help.Summary("Reconnect this participant to the given local domain") + @Help.Description("""Idempotent attempts to re-establish a connection to the given local domain. + |Same behaviour as generic reconnect. + + The arguments are: + ref - The domain reference to connect to + retry - Whether the reconnect should keep on retrying until it succeeded or abort noisly if the connection attempt fails. + synchronize - A timeout duration indicating how long to wait for all topology changes to have been effected on all local nodes. + """) + def reconnect_local( + ref: DomainReference, + retry: Boolean = true, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + ): Boolean = reconnect(ref.name, retry, synchronize) + + @Help.Summary("Reconnect this participant to all domains which are not marked as manual start") + @Help.Description(""" + The arguments are: + ignoreFailures - If set to true (default), we'll attempt to connect to all, ignoring any failure + synchronize - A timeout duration indicating how long to wait for all topology changes to have been effected on all local nodes. + """) + def reconnect_all( + ignoreFailures: Boolean = true, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + ): Unit = { + consoleEnvironment.run { + adminCommand( + ParticipantAdminCommands.DomainConnectivity.ReconnectDomains(ignoreFailures) + ) + } + synchronize.foreach { timeout => + ConsoleMacros.utils.synchronize_topology(Some(timeout))(consoleEnvironment) + } + } + + @Help.Summary("Disconnect this participant from the given domain") + def disconnect(domainAlias: DomainAlias): Unit = consoleEnvironment.run { + adminCommand(ParticipantAdminCommands.DomainConnectivity.DisconnectDomain(domainAlias)) + } + + @Help.Summary("Disconnect this participant from all connected domains") + def disconnect_all(): Unit = { + list_connected().foreach { connected => + disconnect(connected.domainAlias) + } + } + + @Help.Summary("Disconnect this participant from the given local domain") + def disconnect_local(domain: DomainReference): Unit = consoleEnvironment.run { + adminCommand(ParticipantAdminCommands.DomainConnectivity.DisconnectDomain(domain.name)) + } + + @Help.Summary("List the connected domains of this participant") + def list_connected(): Seq[ListConnectedDomainsResult] = consoleEnvironment.run { + adminCommand(ParticipantAdminCommands.DomainConnectivity.ListConnectedDomains()) + } + + @Help.Summary("List the configured domains of this participant") + def list_registered(): Seq[(DomainConnectionConfig, Boolean)] = consoleEnvironment.run { + adminCommand(ParticipantAdminCommands.DomainConnectivity.ListConfiguredDomains) + } + + @Help.Summary("Returns true if a domain is registered using the given alias") + def is_registered(domain: DomainAlias): Boolean = + config(domain).nonEmpty + + @Help.Summary("Returns the current configuration of a given domain") + def config(domain: DomainAlias): Option[DomainConnectionConfig] = + list_registered().map(_._1).find(_.domain == domain) + + @Help.Summary("Register new domain connection") + @Help.Description("""When connecting to a domain, we need to register the domain connection and eventually + |accept the terms of service of the domain before we can connect. The registration process is therefore + |a subset of the operation. Therefore, register is equivalent to connect if the domain does not require + |a service agreement. However, you would usually call register only in advanced scripts.""") + def register(config: DomainConnectionConfig): Unit = { + consoleEnvironment.run { + ParticipantCommands.domains.register(runner, config) + } + } + + @Help.Summary("Modify existing domain connection") + def modify( + domain: DomainAlias, + modifier: DomainConnectionConfig => DomainConnectionConfig, + ): Unit = { + consoleEnvironment.runE { + for { + configured <- adminCommand( + ParticipantAdminCommands.DomainConnectivity.ListConfiguredDomains + ).toEither + cfg <- configured + .map(_._1) + .find(_.domain == domain) + .toRight(s"No such domain ${domain} configured") + newConfig = modifier(cfg) + _ <- + if (newConfig.domain == cfg.domain) Right(()) + else Left("We don't support modifying the domain alias of a DomainConnectionConfig.") + _ <- adminCommand( + ParticipantAdminCommands.DomainConnectivity.ModifyDomainConnection(modifier(cfg)) + ).toEither + } yield () + } + } + + @Help.Summary( + "Get the service agreement of the given domain alias and if it has been accepted already." + ) + def get_agreement(domainAlias: DomainAlias): Option[(v0.Agreement, Boolean)] = + consoleEnvironment.run { + adminCommand(ParticipantAdminCommands.DomainConnectivity.GetAgreement(domainAlias)) + } + @Help.Summary("Accept the service agreement of the given domain alias") + def accept_agreement(domainAlias: DomainAlias, agreementId: String): Unit = + consoleEnvironment.run { + adminCommand( + ParticipantAdminCommands.DomainConnectivity.AcceptAgreement(domainAlias, agreementId) + ) + } + + } + + @Help.Summary("Composability related functionality", FeatureFlag.Preview) + @Help.Group("Transfer") + object transfer extends Helpful { + @Help.Summary( + "Transfer-out a contract from the source domain with destination target domain", + FeatureFlag.Preview, + ) + @Help.Description( + """Transfers the given contract out of the source domain with destination target domain. + The command returns the ID of the transfer when the transfer-out has completed successfully. + The contract is in transit until the transfer-in has completed on the target domain. + The submitting party must be a stakeholder of the contract and the participant must have submission rights + for the submitting party on the source domain. It must also be connected to the target domain. + An application-id can be specified to uniquely identify the application that have issued the transfer, + otherwise the default value will be used. An optional submission id can be set by the committer to the value + of their choice that allows an application to correlate completions to its submissions.""" + ) + def out( + submittingParty: PartyId, + contractId: LfContractId, + sourceDomain: DomainAlias, + targetDomain: DomainAlias, + applicationId: LedgerApplicationId = LedgerApplicationId.assertFromString("AdminConsole"), + submissionId: String = "", + workflowId: String = "", + commandId: String = "", + ): TransferId = + check(FeatureFlag.Preview)(consoleEnvironment.run { + adminCommand( + ParticipantAdminCommands.Transfer + .TransferOut( + submittingParty, + contractId, + sourceDomain, + targetDomain, + applicationId = applicationId, + submissionId = submissionId, + workflowId = workflowId, + commandId = if (commandId.isEmpty) UUID.randomUUID().toString else commandId, + ) + ) + }) + + @Help.Summary("Transfer-in a contract in transit to the target domain", FeatureFlag.Preview) + @Help.Description("""Manually transfers a contract in transit into the target domain. + The command returns when the transfer-in has completed successfully. + If the transferExclusivityTimeout in the target domain's parameters is set to a positive value, + all participants of all stakeholders connected to both origin and target domain will attempt to transfer-in + the contract automatically after the exclusivity timeout has elapsed. + An application-id can be specified to uniquely identifies the application that have issued the transfer, + otherwise the default value will be used. An optional submission id can be set by the committer to the value + of their choice that allows an application to correlate completions to its submissions.""") + def in( + submittingParty: PartyId, + transferId: TransferId, + targetDomain: DomainAlias, + applicationId: LedgerApplicationId = LedgerApplicationId.assertFromString("AdminConsole"), + submissionId: String = "", + workflowId: String = "", + commandId: String = "", + ): Unit = + check(FeatureFlag.Preview)(consoleEnvironment.run { + adminCommand( + ParticipantAdminCommands.Transfer + .TransferIn( + submittingParty, + transferId.toProtoV0, + targetDomain, + applicationId = applicationId, + submissionId = submissionId, + workflowId = workflowId, + commandId = if (commandId.isEmpty) UUID.randomUUID().toString else commandId, + ) + ) + }) + + @Help.Summary("Search the currently in-flight transfers", FeatureFlag.Preview) + @Help.Description( + "Returns all in-flight transfers with the given target domain that match the filters, but no more than the limit specifies." + ) + def search( + targetDomain: DomainAlias, + filterSourceDomain: Option[DomainAlias], + filterTimestamp: Option[Instant], + filterSubmittingParty: Option[PartyId], + limit: PositiveInt = defaultLimit, + ): Seq[TransferSearchResult] = + check(FeatureFlag.Preview)(consoleEnvironment.run { + adminCommand( + ParticipantAdminCommands.Transfer + .TransferSearch( + targetDomain, + filterSourceDomain, + filterTimestamp, + filterSubmittingParty, + limit.value, + ) + ) + }) + + @Help.Summary( + "Transfer the contract from the origin domain to the target domain", + FeatureFlag.Preview, + ) + @Help.Description( + "Macro that first calls transfer_out and then transfer_in. No error handling is done." + ) + def execute( + submittingParty: PartyId, + contractId: LfContractId, + sourceDomain: DomainAlias, + targetDomain: DomainAlias, + ): Unit = { + val transferId = out(submittingParty, contractId, sourceDomain, targetDomain) + in(submittingParty, transferId, targetDomain) + } + + @Help.Summary("Lookup the active domain for the provided contracts", FeatureFlag.Preview) + def lookup_contract_domain(contractIds: LfContractId*): Map[LfContractId, String] = + check(FeatureFlag.Preview) { + consoleEnvironment.run { + adminCommand(ParticipantAdminCommands.Inspection.LookupContractDomain(contractIds.toSet)) + } + } + } + + @Help.Summary("Functionality for managing resources") + @Help.Group("Resource Management") + object resources extends Helpful { + + @Help.Summary("Set resource limits for the participant.") + @Help.Description( + """While a resource limit is attained or exceeded, the participant will reject any additional submission with GRPC status ABORTED. + |Most importantly, a submission will be rejected **before** it consumes a significant amount of resources. + | + |There are three kinds of limits: `maxDirtyRequests`, `maxRate` and `maxBurstFactor`. + |The number of dirty requests of a participant P covers (1) requests initiated by P as well as + |(2) requests initiated by participants other than P that need to be validated by P. + |Compared to the maximum rate, the maximum number of dirty requests reflects the load on the participant more accurately. + |However, the maximum number of dirty requests alone does not protect the system from "bursts": + |If an application submits a huge number of commands at once, the maximum number of dirty requests will likely + |be exceeded, as the system is registering dirty requests only during validation and not already during + |submission. + | + |The maximum rate is a hard limit on the rate of commands submitted to this participant through the ledger API. + |As the rate of commands is checked and updated immediately after receiving a new command submission, + |an application cannot exceed the maximum rate. + | + |The `maxBurstFactor` parameter (positive, default 0.5) allows to configure how permissive the rate limitation should be + |with respect to bursts. The rate limiting will be enforced strictly after having observed `max_burst` * `max_rate` commands. + | + |For the sake of illustration, let's assume the configured rate limit is ``100 commands/s`` with a burst ratio of 0.5. + |If an application submits 100 commands within a single second, waiting exactly 10 milliseconds between consecutive commands, + |then the participant will accept all commands. + |With a `maxBurstFactor` of 0.5, the participant will accept the first 50 commands and reject the remaining 50. + |If the application then waits another 500 ms, it may submit another burst of 50 commands. If it waits 250 ms, + |it may submit only a burst of 25 commands. + | + |Resource limits can only be changed, if the server runs Canton enterprise. + |In the community edition, the server uses fixed limits that cannot be changed.""" + ) + def set_resource_limits(limits: ResourceLimits): Unit = + consoleEnvironment.run { adminCommand(SetResourceLimits(limits)) } + + @Help.Summary("Get the resource limits of the participant.") + def resource_limits(): ResourceLimits = consoleEnvironment.run { + adminCommand(GetResourceLimits()) + } + + } +} + +trait ParticipantHealthAdministrationCommon extends FeatureFlagFilter { + this: HealthAdministrationCommon[ParticipantStatus] => + + protected def runner: AdminCommandRunner + + // Single internal implementation so that `maybe_ping` + // can be hidden behind the `testing` feature flag + private def ping_internal( + participantId: ParticipantId, + timeout: NonNegativeDuration, + workflowId: String, + id: String, + ): Option[Duration] = + consoleEnvironment.run { + runner.adminCommand( + ParticipantAdminCommands.Ping + .Ping( + Set[String](participantId.adminParty.toLf), + Set(), + timeout.asFiniteApproximation.toMillis, + 0, + 0, + workflowId, + id, + ) + ) + } + + @Help.Summary( + "Sends a ping to the target participant over the ledger. " + + "Yields the duration in case of success and throws a RuntimeException in case of failure." + ) + def ping( + participantId: ParticipantId, + timeout: NonNegativeDuration = consoleEnvironment.commandTimeouts.ping, + workflowId: String = "", + id: String = "", + ): Duration = { + val adminApiRes: Option[Duration] = ping_internal(participantId, timeout, workflowId, id) + consoleEnvironment.runE( + adminApiRes.toRight( + s"Unable to ping $participantId within ${LoggerUtil.roundDurationForHumans(timeout.duration)}" + ) + ) + + } + + @Help.Summary( + "Sends a ping to the target participant over the ledger. Yields Some(duration) in case of success and None in case of failure.", + FeatureFlag.Testing, + ) + def maybe_ping( + participantId: ParticipantId, + timeout: NonNegativeDuration = consoleEnvironment.commandTimeouts.ping, + workflowId: String = "", + id: String = "", + ): Option[Duration] = check(FeatureFlag.Testing) { + ping_internal(participantId, timeout, workflowId, id) + } +} + +class ParticipantHealthAdministration( + val runner: AdminCommandRunner, + val consoleEnvironment: ConsoleEnvironment, + override val loggerFactory: NamedLoggerFactory, +) extends HealthAdministration( + runner, + consoleEnvironment, + ParticipantStatus.fromProtoV0, + ) + with FeatureFlagFilter + with ParticipantHealthAdministrationCommon + +class ParticipantHealthAdministrationX( + val runner: AdminCommandRunner, + val consoleEnvironment: ConsoleEnvironment, + override val loggerFactory: NamedLoggerFactory, +) extends HealthAdministrationX( + runner, + consoleEnvironment, + ParticipantStatus.fromProtoV0, + ) + with FeatureFlagFilter + with ParticipantHealthAdministrationCommon diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantRepairAdministration.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantRepairAdministration.scala new file mode 100644 index 0000000000..c87177b625 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantRepairAdministration.scala @@ -0,0 +1,368 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console.commands + +import better.files.File +import com.digitalasset.canton.admin.api.client.commands.{ + GrpcAdminCommand, + ParticipantAdminCommands, +} +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.console.CommandErrors.GenericCommandError +import com.digitalasset.canton.console.{ + AdminCommandRunner, + CommandErrors, + CommandSuccessful, + ConsoleCommandResult, + ConsoleEnvironment, + FeatureFlag, + FeatureFlagFilter, + Help, + Helpful, +} +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.networking.grpc.GrpcError +import com.digitalasset.canton.participant.ParticipantNodeCommon +import com.digitalasset.canton.participant.admin.v0.{ExportAcsRequest, ExportAcsResponse} +import com.digitalasset.canton.participant.domain.DomainConnectionConfig +import com.digitalasset.canton.protocol.{LfContractId, SerializableContractWithWitnesses} +import com.digitalasset.canton.topology.{DomainId, PartyId} +import com.digitalasset.canton.tracing.{NoTracing, TraceContext} +import com.digitalasset.canton.util.ResourceUtil +import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.{DiscardOps, DomainAlias, SequencerCounter} +import com.google.protobuf.ByteString +import io.grpc.Context.CancellableContext +import io.grpc.StatusRuntimeException + +import java.time.Instant +import java.util.UUID +import scala.concurrent.{Await, Promise, TimeoutException} + +class ParticipantRepairAdministration( + val consoleEnvironment: ConsoleEnvironment, + runner: AdminCommandRunner, + val loggerFactory: NamedLoggerFactory, +) extends FeatureFlagFilter + with NoTracing + with Helpful { + + @Help.Summary("Purge contracts with specified Contract IDs from local participant.") + @Help.Description( + """This is a last resort command to recover from data corruption, e.g. in scenarios in which participant + |contracts have somehow gotten out of sync and need to be manually purged, or in situations in which + |stakeholders are no longer available to agree to their archival. The participant needs to be disconnected from + |the domain on which the contracts with "contractIds" reside at the time of the call, and as of now the domain + |cannot have had any inflight requests. + |The "ignoreAlreadyPurged" flag makes it possible to invoke the command multiple times with the same + |parameters in case an earlier command invocation has failed. + |As repair commands are powerful tools to recover from unforeseen data corruption, but dangerous under normal + |operation, use of this command requires (temporarily) enabling the "features.enable-repair-commands" + |configuration. In addition repair commands can run for an unbounded time depending on the number of + |contract ids passed in. Be sure to not connect the participant to the domain until the call returns.""" + ) + def purge( + domain: DomainAlias, + contractIds: Seq[LfContractId], + ignoreAlreadyPurged: Boolean = true, + ): Unit = + consoleEnvironment.run { + runner.adminCommand( + ParticipantAdminCommands.ParticipantRepairManagement.PurgeContracts( + domain = domain, + contracts = contractIds, + ignoreAlreadyPurged = ignoreAlreadyPurged, + ) + ) + } + + @Help.Summary("Migrate contracts from one domain to another one.") + @Help.Description( + """This method can be used to migrate all the contracts associated with a domain to a new domain connection. + This method will register the new domain, connect to it and then re-associate all contracts on the source + domain to the target domain. Please note that this migration needs to be done by all participants + at the same time. The domain should only be used once all participants have finished their migration. + + The arguments are: + source: the domain alias of the source domain + target: the configuration for the target domain + """ + ) + def migrate_domain( + source: DomainAlias, + target: DomainConnectionConfig, + ): Unit = { + consoleEnvironment.run { + runner.adminCommand( + ParticipantAdminCommands.ParticipantRepairManagement.MigrateDomain(source, target) + ) + } + } + + @Help.Summary("Export active contracts for the given set of parties to a file.") + @Help.Description( + """This command exports the current Active Contract Set (ACS) of a given set of parties to ACS snapshot file. + |Afterwards, the 'import_acs' command allows importing it into a participant's ACS again. + |Such ACS export (and import) is interesting for recovery and operational purposes only. + |Note that the 'export_acs' command execution may take a long time to complete and may require significant + |resources. + """ + ) + def export_acs( + parties: Set[PartyId], + outputFile: String = ParticipantRepairAdministration.ExportAcsDefaultFile, + filterDomainId: Option[DomainId] = None, + timestamp: Option[Instant] = None, + contractDomainRenames: Map[DomainId, (DomainId, ProtocolVersion)] = Map.empty, + ): Unit = { + check(FeatureFlag.Repair) { + val collector = AcsSnapshotFileCollector[ExportAcsRequest, ExportAcsResponse](outputFile) + val command = ParticipantAdminCommands.ParticipantRepairManagement + .ExportAcs( + parties, + filterDomainId, + timestamp, + collector.observer, + contractDomainRenames, + ) + collector.materializeFile(command) + } + } + + private case class AcsSnapshotFileCollector[ + Req, + Resp <: GrpcByteChunksToFileObserver.ByteStringChunk, + ](outputFile: String) { + private val target = File(outputFile) + private val requestComplete = Promise[String]() + val observer = new GrpcByteChunksToFileObserver[Resp]( + target, + requestComplete, + ) + private val timeout = consoleEnvironment.commandTimeouts.ledgerCommand + + def materializeFile( + command: GrpcAdminCommand[ + Req, + CancellableContext, + CancellableContext, + ] + ): Unit = { + consoleEnvironment.run { + + def call = consoleEnvironment.run { + runner.adminCommand( + command + ) + } + + try { + ResourceUtil.withResource(call) { _ => + CommandSuccessful( + Await + .result( + requestComplete.future, + timeout.duration, + ) + .discard + ) + } + } catch { + case sre: StatusRuntimeException => + GenericCommandError( + GrpcError("Generating acs snapshot file", "download_acs_snapshot", sre).toString + ) + case _: TimeoutException => + target.delete(swallowIOExceptions = true) + CommandErrors.ConsoleTimeout.Error(timeout.asJavaApproximation) + } + } + } + } + + @Help.Summary("Import active contracts from an Active Contract Set (ACS) snapshot file.") + @Help.Description( + """This command imports contracts from an ACS snapshot file into the participant's ACS. + |The given ACS snapshot file needs to be the resulting file from a previous 'export_acs' command invocation. + """ + ) + def import_acs( + inputFile: String = ParticipantRepairAdministration.ExportAcsDefaultFile, + workflowIdPrefix: String = "", + ): Unit = { + check(FeatureFlag.Repair) { + consoleEnvironment.run { + runner.adminCommand( + ParticipantAdminCommands.ParticipantRepairManagement.ImportAcs( + ByteString.copyFrom(File(inputFile).loadBytes), + if (workflowIdPrefix.nonEmpty) workflowIdPrefix + else s"import-${UUID.randomUUID}", + ) + ) + } + } + } + +} + +abstract class LocalParticipantRepairAdministration( + override val consoleEnvironment: ConsoleEnvironment, + runner: AdminCommandRunner, + override val loggerFactory: NamedLoggerFactory, +) extends ParticipantRepairAdministration( + consoleEnvironment = consoleEnvironment, + runner = runner, + loggerFactory = loggerFactory, + ) { + + protected def access[T](handler: ParticipantNodeCommon => T): T + + @Help.Summary("Add specified contracts to specific domain on local participant.") + @Help.Description( + """This is a last resort command to recover from data corruption, e.g. in scenarios in which participant + |contracts have somehow gotten out of sync and need to be manually created. The participant needs to be + |disconnected from the specified "domain" at the time of the call, and as of now the domain cannot have had + |any inflight requests. + |For each "contractsToAdd", specify "witnesses", local parties, in case no local party is a stakeholder. + |The "ignoreAlreadyAdded" flag makes it possible to invoke the command multiple times with the same + |parameters in case an earlier command invocation has failed. + | + |As repair commands are powerful tools to recover from unforeseen data corruption, but dangerous under normal + |operation, use of this command requires (temporarily) enabling the "features.enable-repair-commands" + |configuration. In addition repair commands can run for an unbounded time depending on the number of + |contracts passed in. Be sure to not connect the participant to the domain until the call returns. + | + The arguments are: + - domain: the alias of the domain to which to add the contract + - contractsToAdd: list of contracts to add with witness information + - ignoreAlreadyAdded: (default true) if set to true, it will ignore contracts that already exist on the target domain. + - ignoreStakeholderCheck: (default false) if set to true, add will work for contracts that don't have a local party (useful for party migration). + """ + ) + def add( + domain: DomainAlias, + contractsToAdd: Seq[SerializableContractWithWitnesses], + ignoreAlreadyAdded: Boolean = true, + ignoreStakeholderCheck: Boolean = false, + ): Unit = + runRepairCommand(tc => + access( + _.sync.repairService + .addContracts( + domain, + contractsToAdd, + ignoreAlreadyAdded, + ignoreStakeholderCheck, + )(tc) + ) + ) + + private def runRepairCommand[T](command: TraceContext => Either[String, T]): T = + check(FeatureFlag.Repair) { + consoleEnvironment.run { + ConsoleCommandResult.fromEither { + // Ensure that admin repair commands have a non-empty trace context. + TraceContext.withNewTraceContext(command(_)) + } + } + } + + @Help.Summary("Move contracts with specified Contract IDs from one domain to another.") + @Help.Description( + """This is a last resort command to recover from data corruption in scenarios in which a domain is + |irreparably broken and formerly connected participants need to move contracts to another, healthy domain. + |The participant needs to be disconnected from both the "sourceDomain" and the "targetDomain". Also as of now + |the target domain cannot have had any inflight requests. + |Contracts already present in the target domain will be skipped, and this makes it possible to invoke this + |command in an "idempotent" fashion in case an earlier attempt had resulted in an error. + |The "skipInactive" flag makes it possible to only move active contracts in the "sourceDomain". + |As repair commands are powerful tools to recover from unforeseen data corruption, but dangerous under normal + |operation, use of this command requires (temporarily) enabling the "features.enable-repair-commands" + |configuration. In addition repair commands can run for an unbounded time depending on the number of + |contract ids passed in. Be sure to not connect the participant to either domain until the call returns. + + Arguments: + - contractIds - set of contract ids that should be moved to the new domain + - sourceDomain - alias of the source domain + - targetDomain - alias of the target domain + - skipInactive - (default true) whether to skip inactive contracts mentioned in the contractIds list + - batchSize - (default 100) how many contracts to write at once to the database""" + ) + def change_domain( + contractIds: Seq[LfContractId], + sourceDomain: DomainAlias, + targetDomain: DomainAlias, + skipInactive: Boolean = true, + batchSize: Int = 100, + ): Unit = + runRepairCommand(tc => + access( + _.sync.repairService.changeDomainAwait( + contractIds, + sourceDomain, + targetDomain, + skipInactive, + PositiveInt.tryCreate(batchSize), + )(tc) + ) + ) + + @Help.Summary("Mark sequenced events as ignored.") + @Help.Description( + """This is the last resort to ignore events that the participant is unable to process. + |Ignoring events may lead to subsequent failures, e.g., if the event creating a contract is ignored and + |that contract is subsequently used. It may also lead to ledger forks if other participants still process + |the ignored events. + |It is possible to mark events as ignored that the participant has not yet received. + | + |The command will fail, if marking events between `from` and `to` as ignored would result in a gap in sequencer counters, + |namely if `from <= to` and `from` is greater than `maxSequencerCounter + 1`, + |where `maxSequencerCounter` is the greatest sequencer counter of a sequenced event stored by the underlying participant. + | + |The command will also fail, if `force == false` and `from` is smaller than the sequencer counter of the last event + |that has been marked as clean. + |(Ignoring such events would normally have no effect, as they have already been processed.)""" + ) + def ignore_events( + domainId: DomainId, + from: SequencerCounter, + to: SequencerCounter, + force: Boolean = false, + ): Unit = + runRepairCommand(tc => + access { + _.sync.repairService.ignoreEvents(domainId, from, to, force)(tc) + } + ) + + @Help.Summary("Remove the ignored status from sequenced events.") + @Help.Description( + """This command has no effect on ordinary (i.e., not ignored) events and on events that do not exist. + | + |The command will fail, if marking events between `from` and `to` as unignored would result in a gap in sequencer counters, + |namely if there is one empty ignored event with sequencer counter between `from` and `to` and + |another empty ignored event with sequencer counter greater than `to`. + |An empty ignored event is an event that has been marked as ignored and not yet received by the participant. + | + |The command will also fail, if `force == false` and `from` is smaller than the sequencer counter of the last event + |that has been marked as clean. + |(Unignoring such events would normally have no effect, as they have already been processed.)""" + ) + def unignore_events( + domainId: DomainId, + from: SequencerCounter, + to: SequencerCounter, + force: Boolean = false, + ): Unit = + runRepairCommand(tc => + access { + _.sync.repairService.unignoreEvents(domainId, from, to, force)(tc) + } + ) +} + +object ParticipantRepairAdministration { + private val DefaultFile = "canton-acs-snapshot.gz" + private val ExportAcsDefaultFile = "canton-acs-export.gz" +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala new file mode 100644 index 0000000000..1145db242e --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala @@ -0,0 +1,398 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console.commands + +import cats.syntax.either.* +import cats.syntax.foldable.* +import cats.syntax.traverse.* +import com.digitalasset.canton.LedgerParticipantId +import com.digitalasset.canton.admin.api.client.commands.{ + ParticipantAdminCommands, + TopologyAdminCommands, +} +import com.digitalasset.canton.admin.api.client.data.{ + ListConnectedDomainsResult, + ListPartiesResult, + PartyDetails, +} +import com.digitalasset.canton.config.CantonRequireTypes.String255 +import com.digitalasset.canton.config.NonNegativeDuration +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.console.{ + AdminCommandRunner, + BaseInspection, + CantonInternalError, + CommandFailure, + ConsoleCommandResult, + ConsoleEnvironment, + ConsoleMacros, + FeatureFlag, + FeatureFlagFilter, + Help, + Helpful, + LocalParticipantReference, + ParticipantReference, +} +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.participant.ParticipantNode +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.transaction.{ + ParticipantPermission, + RequestSide, + TopologyChangeOp, +} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ShowUtil.* +import com.google.protobuf.ByteString + +import java.time.Instant +import scala.util.Try + +class PartiesAdministrationGroup(runner: AdminCommandRunner, consoleEnvironment: ConsoleEnvironment) + extends Helpful { + + protected def defaultLimit: PositiveInt = + consoleEnvironment.environment.config.parameters.console.defaultLimit + + import runner.* + + @Help.Summary( + "List active parties, their active participants, and the participants' permissions on domains." + ) + @Help.Description( + """Inspect the parties known by this participant as used for synchronisation. + |The response is built from the timestamped topology transactions of each domain, excluding the + |authorized store of the given node. For each known party, the list of active + |participants and their permission on the domain for that party is given. + | + filterParty: Filter by parties starting with the given string. + filterParticipant: Filter for parties that are hosted by a participant with an id starting with the given string + filterDomain: Filter by domains whose id starts with the given string. + asOf: Optional timestamp to inspect the topology state at a given point in time. + limit: Limit on the number of parties fetched (defaults to canton.parameters.console.default-limit). + + Example: participant1.parties.list(filterParty="alice") + """ + ) + def list( + filterParty: String = "", + filterParticipant: String = "", + filterDomain: String = "", + asOf: Option[Instant] = None, + limit: PositiveInt = defaultLimit, + ): Seq[ListPartiesResult] = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Aggregation.ListParties( + filterDomain = filterDomain, + filterParty = filterParty, + filterParticipant = filterParticipant, + asOf = asOf, + limit = limit, + ) + ) + } +} + +class ParticipantPartiesAdministrationGroup( + participantId: => ParticipantId, + runner: AdminCommandRunner & ParticipantAdministration & BaseLedgerApiAdministration, + consoleEnvironment: ConsoleEnvironment, +) extends PartiesAdministrationGroup(runner, consoleEnvironment) { + + @Help.Summary("List parties hosted by this participant") + @Help.Description("""Inspect the parties hosted by this participant as used for synchronisation. + |The response is built from the timestamped topology transactions of each domain, excluding the + |authorized store of the given node. The search will include all hosted parties and is equivalent + |to running the `list` method using the participant id of the invoking participant. + | + filterParty: Filter by parties starting with the given string. + filterDomain: Filter by domains whose id starts with the given string. + asOf: Optional timestamp to inspect the topology state at a given point in time. + limit: How many items to return (defaults to canton.parameters.console.default-limit) + + Example: participant1.parties.hosted(filterParty="alice")""") + def hosted( + filterParty: String = "", + filterDomain: String = "", + asOf: Option[Instant] = None, + limit: PositiveInt = defaultLimit, + ): Seq[ListPartiesResult] = { + list( + filterParty, + filterParticipant = participantId.filterString, + filterDomain = filterDomain, + asOf = asOf, + limit = limit, + ) + } + + @Help.Summary("Find a party from a filter string") + @Help.Description( + """Will search for all parties that match this filter string. If it finds exactly one party, it + |will return that one. Otherwise, the function will throw.""" + ) + def find(filterParty: String): PartyId = { + list(filterParty).map(_.party).distinct.toList match { + case one :: Nil => one + case Nil => throw new IllegalArgumentException(s"No party matching $filterParty") + case more => + throw new IllegalArgumentException(s"Multiple parties match $filterParty: $more") + } + } + + @Help.Summary("Enable/add party to participant") + @Help.Description("""This function registers a new party with the current participant within the participants + |namespace. The function fails if the participant does not have appropriate signing keys + |to issue the corresponding PartyToParticipant topology transaction. + |Optionally, a local display name can be added. This display name will be exposed on the + |ledger API party management endpoint. + |Specifying a set of domains via the `WaitForDomain` parameter ensures that the domains have + |enabled/added a party by the time the call returns, but other participants connected to the same domains may not + |yet be aware of the party. + |Additionally, a sequence of additional participants can be added to be synchronized to + |ensure that the party is known to these participants as well before the function terminates. + |""") + def enable( + name: String, + displayName: Option[String] = None, + // TODO(i10809) replace wait for domain for a clean topology synchronisation using the dispatcher info + waitForDomain: DomainChoice = DomainChoice.Only(Seq()), + synchronizeParticipants: Seq[ParticipantReference] = Seq(), + ): PartyId = { + + def registered(lst: => Seq[ListPartiesResult]): Set[DomainId] = { + lst + .flatMap(_.participants.flatMap(_.domains)) + .map(_.domain) + .toSet + } + def primaryRegistered(partyId: PartyId) = + registered( + list(filterParty = partyId.filterString, filterParticipant = participantId.filterString) + ) + + def primaryConnected: Either[String, Seq[ListConnectedDomainsResult]] = + runner + .adminCommand(ParticipantAdminCommands.DomainConnectivity.ListConnectedDomains()) + .toEither + + def findDomainIds( + name: String, + connected: Either[String, Seq[ListConnectedDomainsResult]], + ): Either[String, Set[DomainId]] = { + for { + domainIds <- waitForDomain match { + case DomainChoice.All => + connected.map(_.map(_.domainId)) + case DomainChoice.Only(Seq()) => + Right(Seq()) + case DomainChoice.Only(aliases) => + connected.flatMap { res => + val connectedM = res.map(x => (x.domainAlias, x.domainId)).toMap + aliases.traverse(alias => connectedM.get(alias).toRight(s"Unknown: $alias for $name")) + } + } + } yield domainIds.toSet + } + def retryE(condition: => Boolean, message: => String): Either[String, Unit] = { + AdminCommandRunner + .retryUntilTrue(consoleEnvironment.commandTimeouts.ledgerCommand)(condition) + .toEither + .leftMap(_ => message) + } + def waitForParty( + partyId: PartyId, + domainIds: Set[DomainId], + registered: => Set[DomainId], + queriedParticipant: ParticipantId = participantId, + ): Either[String, Unit] = { + if (domainIds.nonEmpty) { + retryE( + domainIds subsetOf registered, + show"Party $partyId did not appear for $queriedParticipant on domain ${domainIds.diff(registered)}", + ) + } else Right(()) + } + val syncLedgerApi = waitForDomain match { + case DomainChoice.All => true + case DomainChoice.Only(aliases) => aliases.nonEmpty + } + consoleEnvironment.run { + ConsoleCommandResult.fromEither { + for { + // validating party and display name here to prevent, e.g., a party being registered despite it having an invalid display name + // assert that name is valid ParticipantId + id <- Identifier.create(name) + partyId = PartyId(participantId.uid.copy(id = id)) + _ <- Either + .catchOnly[IllegalArgumentException](LedgerParticipantId.assertFromString(name)) + .leftMap(_.getMessage) + validDisplayName <- displayName.map(String255.create(_, Some("display name"))).sequence + // find the domain ids + domainIds <- findDomainIds(this.participantId.uid.id.unwrap, primaryConnected) + // find the domain ids the additional participants are connected to + additionalSync <- synchronizeParticipants.traverse { p => + findDomainIds( + p.name, + Try(p.domains.list_connected()).toEither.leftMap { + case exception @ (_: CommandFailure | _: CantonInternalError) => + exception.getMessage + case exception => throw exception + }, + ) + .map(domains => (p, domains intersect domainIds)) + } + _ <- runPartyCommand(partyId, TopologyChangeOp.Add).toEither + _ <- validDisplayName match { + case None => Right(()) + case Some(name) => + runner + .adminCommand( + ParticipantAdminCommands.PartyNameManagement + .SetPartyDisplayName(partyId, name.unwrap) + ) + .toEither + } + _ <- waitForParty(partyId, domainIds, primaryRegistered(partyId)) + _ <- + // sync with ledger-api server if this node is connected to at least one domain + if (syncLedgerApi && primaryConnected.exists(_.nonEmpty)) + retryE( + runner.ledger_api.parties.list().map(_.party).contains(partyId), + show"The party $partyId never appeared on the ledger API server", + ) + else Right(()) + _ <- additionalSync.traverse_ { case (p, domains) => + waitForParty( + partyId, + domains, + registered( + p.parties.list( + filterParty = partyId.filterString, + filterParticipant = participantId.filterString, + ) + ), + p.id, + ) + } + } yield partyId + } + } + + } + + private def runPartyCommand( + partyId: PartyId, + op: TopologyChangeOp, + force: Boolean = false, + ): ConsoleCommandResult[ByteString] = { + runner + .adminCommand( + TopologyAdminCommands.Write.AuthorizePartyToParticipant( + op, + None, + RequestSide.Both, + partyId, + participantId, + ParticipantPermission.Submission, + replaceExisting = false, + force = force, + ) + ) + } + + @Help.Summary("Disable party on participant") + def disable(name: Identifier, force: Boolean = false): Unit = { + val partyId = PartyId(participantId.uid.copy(id = name)) + val _ = consoleEnvironment.run { + runPartyCommand(partyId, TopologyChangeOp.Remove, force) + } + } + + @Help.Summary("Update participant-local party details") + @Help.Description( + """Currently you can update only the annotations. + |You cannot update other user attributes. + party: party to be updated, + modifier: a function to modify the party details, e.g.: `partyDetails => { partyDetails.copy(annotations = partyDetails.annotations.updated("a", "b").removed("c")) }`""" + ) + def update( + party: PartyId, + modifier: PartyDetails => PartyDetails, + ): PartyDetails = { + runner.ledger_api.parties.update( + party = party, + modifier = modifier, + ) + } + + @Help.Summary("Set party display name") + @Help.Description( + "Locally set the party display name (shown on the ledger-api) to the given value" + ) + def set_display_name(party: PartyId, displayName: String): Unit = consoleEnvironment.run { + // takes displayName as String argument which is validated at GrpcPartyNameManagementService + runner.adminCommand( + ParticipantAdminCommands.PartyNameManagement.SetPartyDisplayName(party, displayName) + ) + } +} + +class LocalParticipantPartiesAdministrationGroup( + reference: LocalParticipantReference, + runner: AdminCommandRunner + & BaseInspection[ParticipantNode] + & ParticipantAdministration + & BaseLedgerApiAdministration, + val consoleEnvironment: ConsoleEnvironment, + val loggerFactory: NamedLoggerFactory, +) extends ParticipantPartiesAdministrationGroup(reference.id, runner, consoleEnvironment) + with FeatureFlagFilter { + + import runner.* + + @Help.Summary("Waits for any topology changes to be observed", FeatureFlag.Preview) + @Help.Description( + "Will throw an exception if the given topology has not been observed within the given timeout." + ) + def await_topology_observed[T <: ParticipantReference]( + partyAssignment: Set[(PartyId, T)], + timeout: NonNegativeDuration = consoleEnvironment.commandTimeouts.bounded, + )(implicit env: ConsoleEnvironment): Unit = + check(FeatureFlag.Preview) { + access(node => + TopologySynchronisation.awaitTopologyObserved(reference, partyAssignment, timeout) + ) + } + +} + +object TopologySynchronisation { + + def awaitTopologyObserved[T <: ParticipantReference]( + reference: ParticipantReference, + partyAssignment: Set[(PartyId, T)], + timeout: NonNegativeDuration, + )(implicit env: ConsoleEnvironment): Unit = + TraceContext.withNewTraceContext { _ => + ConsoleMacros.utils.retry_until_true(timeout) { + val partiesWithId = partyAssignment.map { case (party, participantRef) => + (party, participantRef.id) + } + env.domains.all.forall { domain => + val domainId = domain.id + !reference.domains.active(domain) || { + val timestamp = reference.testing.fetch_domain_time(domainId) + partiesWithId.subsetOf( + reference.parties + .list(asOf = Some(timestamp.toInstant)) + .flatMap(res => res.participants.map(par => (res.party, par.participant))) + .toSet + ) + } + } + } + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministrationX.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministrationX.scala new file mode 100644 index 0000000000..310fb3ecd0 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministrationX.scala @@ -0,0 +1,430 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console.commands + +import cats.syntax.either.* +import cats.syntax.foldable.* +import cats.syntax.traverse.* +import com.digitalasset.canton.LedgerParticipantId +import com.digitalasset.canton.admin.api.client.commands.{ + ParticipantAdminCommands, + TopologyAdminCommands, + TopologyAdminCommandsX, +} +import com.digitalasset.canton.admin.api.client.data.{ + ListConnectedDomainsResult, + ListPartiesResult, + PartyDetails, +} +import com.digitalasset.canton.config.CantonRequireTypes.String255 +import com.digitalasset.canton.config.NonNegativeDuration +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.console.{ + AdminCommandRunner, + BaseInspection, + CantonInternalError, + CommandFailure, + ConsoleCommandResult, + ConsoleEnvironment, + ConsoleMacros, + FeatureFlag, + FeatureFlagFilter, + Help, + Helpful, + InstanceReferenceX, + LocalParticipantReferenceX, + ParticipantReferenceX, +} +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.participant.ParticipantNodeX +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.store.TopologyStoreId.AuthorizedStore +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ShowUtil.* + +import java.time.Instant +import scala.util.Try + +class PartiesAdministrationGroupX( + runner: AdminCommandRunner, + consoleEnvironment: ConsoleEnvironment, +) extends Helpful { + + protected def defaultLimit: PositiveInt = + consoleEnvironment.environment.config.parameters.console.defaultLimit + + import runner.* + + @Help.Summary( + "List active parties, their active participants, and the participants' permissions on domains." + ) + @Help.Description( + """Inspect the parties known by this participant as used for synchronisation. + |The response is built from the timestamped topology transactions of each domain, excluding the + |authorized store of the given node. For each known party, the list of active + |participants and their permission on the domain for that party is given. + | + filterParty: Filter by parties starting with the given string. + filterParticipant: Filter for parties that are hosted by a participant with an id starting with the given string + filterDomain: Filter by domains whose id starts with the given string. + asOf: Optional timestamp to inspect the topology state at a given point in time. + limit: Limit on the number of parties fetched (defaults to canton.parameters.console.default-limit). + + Example: participant1.parties.list(filterParty="alice") + """ + ) + def list( + filterParty: String = "", + filterParticipant: String = "", + filterDomain: String = "", + asOf: Option[Instant] = None, + limit: PositiveInt = defaultLimit, + ): Seq[ListPartiesResult] = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Aggregation.ListParties( + filterDomain = filterDomain, + filterParty = filterParty, + filterParticipant = filterParticipant, + asOf = asOf, + limit = limit, + ) + ) + } +} + +class ParticipantPartiesAdministrationGroupX( + participantId: => ParticipantId, + runner: AdminCommandRunner + & ParticipantAdministration + & BaseLedgerApiAdministration + & InstanceReferenceX, + consoleEnvironment: ConsoleEnvironment, +) extends PartiesAdministrationGroupX(runner, consoleEnvironment) { + + @Help.Summary("List parties hosted by this participant") + @Help.Description("""Inspect the parties hosted by this participant as used for synchronisation. + |The response is built from the timestamped topology transactions of each domain, excluding the + |authorized store of the given node. The search will include all hosted parties and is equivalent + |to running the `list` method using the participant id of the invoking participant. + | + filterParty: Filter by parties starting with the given string. + filterDomain: Filter by domains whose id starts with the given string. + asOf: Optional timestamp to inspect the topology state at a given point in time. + limit: How many items to return (defaults to canton.parameters.console.default-limit) + + Example: participant1.parties.hosted(filterParty="alice")""") + def hosted( + filterParty: String = "", + filterDomain: String = "", + asOf: Option[Instant] = None, + limit: PositiveInt = defaultLimit, + ): Seq[ListPartiesResult] = { + list( + filterParty, + filterParticipant = participantId.filterString, + filterDomain = filterDomain, + asOf = asOf, + limit = limit, + ) + } + + @Help.Summary("Find a party from a filter string") + @Help.Description( + """Will search for all parties that match this filter string. If it finds exactly one party, it + |will return that one. Otherwise, the function will throw.""" + ) + def find(filterParty: String): PartyId = { + list(filterParty).map(_.party).distinct.toList match { + case one :: Nil => one + case Nil => throw new IllegalArgumentException(s"No party matching $filterParty") + case more => + throw new IllegalArgumentException(s"Multiple parties match $filterParty: $more") + } + } + + @Help.Summary("Enable/add party to participant") + @Help.Description("""This function registers a new party with the current participant within the participants + |namespace. The function fails if the participant does not have appropriate signing keys + |to issue the corresponding PartyToParticipant topology transaction. + |Optionally, a local display name can be added. This display name will be exposed on the + |ledger API party management endpoint. + |Specifying a set of domains via the `WaitForDomain` parameter ensures that the domains have + |enabled/added a party by the time the call returns, but other participants connected to the same domains may not + |yet be aware of the party. + |Additionally, a sequence of additional participants can be added to be synchronized to + |ensure that the party is known to these participants as well before the function terminates. + |""") + def enable( + name: String, + namespace: Namespace = participantId.uid.namespace, + participants: Seq[ParticipantId] = Seq(participantId), + threshold: PositiveInt = PositiveInt.one, + displayName: Option[String] = None, + // TODO(i10809) replace wait for domain for a clean topology synchronisation using the dispatcher info + waitForDomain: DomainChoice = DomainChoice.Only(Seq()), + synchronizeParticipants: Seq[ParticipantReferenceX] = Seq(), + groupAddressing: Boolean = false, + mustFullyAuthorize: Boolean = true, + ): PartyId = { + + def registered(lst: => Seq[ListPartiesResult]): Set[DomainId] = { + lst + .flatMap(_.participants.flatMap(_.domains)) + .map(_.domain) + .toSet + } + def primaryRegistered(partyId: PartyId) = + registered( + list(filterParty = partyId.filterString, filterParticipant = participantId.filterString) + ) + + def primaryConnected: Either[String, Seq[ListConnectedDomainsResult]] = + runner + .adminCommand(ParticipantAdminCommands.DomainConnectivity.ListConnectedDomains()) + .toEither + + def findDomainIds( + name: String, + connected: Either[String, Seq[ListConnectedDomainsResult]], + ): Either[String, Set[DomainId]] = { + for { + domainIds <- waitForDomain match { + case DomainChoice.All => + connected.map(_.map(_.domainId)) + case DomainChoice.Only(Seq()) => + Right(Seq()) + case DomainChoice.Only(aliases) => + connected.flatMap { res => + val connectedM = res.map(x => (x.domainAlias, x.domainId)).toMap + aliases.traverse(alias => connectedM.get(alias).toRight(s"Unknown: $alias for $name")) + } + } + } yield domainIds.toSet + } + def retryE(condition: => Boolean, message: => String): Either[String, Unit] = { + AdminCommandRunner + .retryUntilTrue(consoleEnvironment.commandTimeouts.ledgerCommand)(condition) + .toEither + .leftMap(_ => message) + } + def waitForParty( + partyId: PartyId, + domainIds: Set[DomainId], + registered: => Set[DomainId], + queriedParticipant: ParticipantId = participantId, + ): Either[String, Unit] = { + if (domainIds.nonEmpty) { + retryE( + domainIds subsetOf registered, + show"Party $partyId did not appear for $queriedParticipant on domain ${domainIds.diff(registered)}", + ) + } else Right(()) + } + val syncLedgerApi = waitForDomain match { + case DomainChoice.All => true + case DomainChoice.Only(aliases) => aliases.nonEmpty + } + consoleEnvironment.run { + ConsoleCommandResult.fromEither { + for { + // validating party and display name here to prevent, e.g., a party being registered despite it having an invalid display name + // assert that name is valid ParticipantId + id <- Identifier.create(name) + partyId = PartyId(id, namespace) + _ <- Either + .catchOnly[IllegalArgumentException](LedgerParticipantId.assertFromString(name)) + .leftMap(_.getMessage) + validDisplayName <- displayName.map(String255.create(_, Some("display name"))).sequence + // find the domain ids + domainIds <- findDomainIds(this.participantId.uid.id.unwrap, primaryConnected) + // find the domain ids the additional participants are connected to + additionalSync <- synchronizeParticipants.traverse { p => + findDomainIds( + p.name, + Try(p.domains.list_connected()).toEither.leftMap { + case exception @ (_: CommandFailure | _: CantonInternalError) => + exception.getMessage + case exception => throw exception + }, + ) + .map(domains => (p, domains intersect domainIds)) + } + _ <- runPartyCommand( + partyId, + participants, + threshold, + groupAddressing, + mustFullyAuthorize, + ).toEither + _ <- validDisplayName match { + case None => Right(()) + case Some(name) => + runner + .adminCommand( + ParticipantAdminCommands.PartyNameManagement + .SetPartyDisplayName(partyId, name.unwrap) + ) + .toEither + } + _ <- waitForParty(partyId, domainIds, primaryRegistered(partyId)) + _ <- + // sync with ledger-api server if this node is connected to at least one domain + if (syncLedgerApi && primaryConnected.exists(_.nonEmpty)) + retryE( + runner.ledger_api.parties.list().map(_.party).contains(partyId), + show"The party $partyId never appeared on the ledger API server", + ) + else Right(()) + _ <- additionalSync.traverse_ { case (p, domains) => + waitForParty( + partyId, + domains, + registered( + p.parties.list( + filterParty = partyId.filterString, + filterParticipant = participantId.filterString, + ) + ), + p.id, + ) + } + } yield partyId + } + } + + } + + private def runPartyCommand( + partyId: PartyId, + participants: Seq[ParticipantId], + threshold: PositiveInt, + groupAddressing: Boolean, + mustFullyAuthorize: Boolean, + ): ConsoleCommandResult[SignedTopologyTransactionX[TopologyChangeOpX, PartyToParticipantX]] = { + + runner + .adminCommand( + TopologyAdminCommandsX.Write.Propose( + // TODO(#14048) properly set the serial or introduce auto-detection so we don't + // have to set it on the client side + mapping = PartyToParticipantX( + partyId, + None, + threshold, + participants.map(pid => + HostingParticipant( + pid, + if (threshold.value > 1) ParticipantPermissionX.Confirmation + else ParticipantPermissionX.Submission, + ) + ), + groupAddressing, + ), + signedBy = Seq(this.participantId.uid.namespace.fingerprint), + serial = None, + store = AuthorizedStore.filterName, + mustFullyAuthorize = mustFullyAuthorize, + ) + ) + } + + @Help.Summary("Disable party on participant") + // TODO(#14067): reintroduce `force` once it is implemented on the server side and threaded through properly. + def disable(name: Identifier /*, force: Boolean = false*/ ): Unit = { + runner.topology.party_to_participant_mappings + .propose_delta( + PartyId(name, runner.id.member.uid.namespace), + removes = List(this.participantId), + ) + .discard + } + + @Help.Summary("Update participant-local party details") + @Help.Description( + """Currently you can update only the annotations. + |You cannot update other user attributes. + party: party to be updated, + modifier: a function to modify the party details, e.g.: `partyDetails => { partyDetails.copy(annotations = partyDetails.annotations.updated("a", "b").removed("c")) }`""" + ) + def update( + party: PartyId, + modifier: PartyDetails => PartyDetails, + ): PartyDetails = { + runner.ledger_api.parties.update( + party = party, + modifier = modifier, + ) + } + + @Help.Summary("Set party display name") + @Help.Description( + "Locally set the party display name (shown on the ledger-api) to the given value" + ) + def set_display_name(party: PartyId, displayName: String): Unit = consoleEnvironment.run { + // takes displayName as String argument which is validated at GrpcPartyNameManagementService + runner.adminCommand( + ParticipantAdminCommands.PartyNameManagement.SetPartyDisplayName(party, displayName) + ) + } +} + +class LocalParticipantPartiesAdministrationGroupX( + reference: LocalParticipantReferenceX, + runner: AdminCommandRunner + & BaseInspection[ParticipantNodeX] + & ParticipantAdministration + & BaseLedgerApiAdministration + & InstanceReferenceX, + val consoleEnvironment: ConsoleEnvironment, + val loggerFactory: NamedLoggerFactory, +) extends ParticipantPartiesAdministrationGroupX(reference.id, runner, consoleEnvironment) + with FeatureFlagFilter { + + import runner.* + + @Help.Summary("Waits for any topology changes to be observed", FeatureFlag.Preview) + @Help.Description( + "Will throw an exception if the given topology has not been observed within the given timeout." + ) + def await_topology_observed[T <: ParticipantReferenceX]( + partyAssignment: Set[(PartyId, T)], + timeout: NonNegativeDuration = consoleEnvironment.commandTimeouts.bounded, + )(implicit env: ConsoleEnvironment): Unit = + check(FeatureFlag.Preview) { + access(node => + TopologySynchronisationX.awaitTopologyObserved(reference, partyAssignment, timeout) + ) + } + +} + +object TopologySynchronisationX { + + def awaitTopologyObserved[T <: ParticipantReferenceX]( + reference: ParticipantReferenceX, + partyAssignment: Set[(PartyId, T)], + timeout: NonNegativeDuration, + )(implicit env: ConsoleEnvironment): Unit = + TraceContext.withNewTraceContext { _ => + ConsoleMacros.utils.retry_until_true(timeout) { + val partiesWithId = partyAssignment.map { case (party, participantRef) => + (party, participantRef.id) + } + env.domains.all.forall { domain => + val domainId = domain.id + !reference.domains.active(domain) || { + val timestamp = reference.testing.fetch_domain_time(domainId) + partiesWithId.subsetOf( + reference.parties + .list(asOf = Some(timestamp.toInstant)) + .flatMap(res => res.participants.map(par => (res.party, par.participant))) + .toSet + ) + } + } + } + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PruningSchedulerAdministration.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PruningSchedulerAdministration.scala new file mode 100644 index 0000000000..90b8056509 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PruningSchedulerAdministration.scala @@ -0,0 +1,109 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console.commands + +import com.digitalasset.canton.admin.api.client.commands.PruningSchedulerCommands +import com.digitalasset.canton.admin.api.client.data.PruningSchedule +import com.digitalasset.canton.config.PositiveDurationSeconds +import com.digitalasset.canton.console.{AdminCommandRunner, ConsoleEnvironment, Help, Helpful} +import com.digitalasset.canton.logging.NamedLoggerFactory +import io.grpc.stub.AbstractStub + +/** Pruning scheduler administration api shared by participant/mediator/sequencer. + */ +class PruningSchedulerAdministration[T <: AbstractStub[T]]( + runner: AdminCommandRunner, + protected val consoleEnvironment: ConsoleEnvironment, + commands: PruningSchedulerCommands[T], + protected val loggerFactory: NamedLoggerFactory, +) extends Helpful { + + @Help.Summary( + "Activate automatic pruning according to the specified schedule." + ) + @Help.Description( + """The schedule is specified in cron format and "max_duration" and "retention" durations. The cron string indicates + |the points in time at which pruning should begin in the GMT time zone, and the maximum duration indicates how + |long from the start time pruning is allowed to run as long as pruning has not finished pruning up to the + |specified retention period. + """ + ) + def set_schedule( + cron: String, + maxDuration: PositiveDurationSeconds, + retention: PositiveDurationSeconds, + ): Unit = + consoleEnvironment.run( + runner.adminCommand( + commands.SetScheduleCommand(cron = cron, maxDuration = maxDuration, retention = retention) + ) + ) + + @Help.Summary("Deactivate automatic pruning.") + def clear_schedule(): Unit = + consoleEnvironment.run( + runner.adminCommand(commands.ClearScheduleCommand()) + ) + + @Help.Summary("Modify the cron used by automatic pruning.") + @Help.Description( + """The schedule is specified in cron format and refers to pruning start times in the GMT time zone. + |This call returns an error if no schedule has been configured via `set_schedule` or if automatic + |pruning has been disabled via `clear_schedule`. Additionally if at the time of this modification, pruning is + |actively running, a best effort is made to pause pruning and restart according to the new schedule. This + |allows for the case that the new schedule no longer allows pruning at the current time. + """ + ) + def set_cron(cron: String): Unit = + consoleEnvironment.run( + runner.adminCommand(commands.SetCronCommand(cron)) + ) + + @Help.Summary("Modify the maximum duration used by automatic pruning.") + @Help.Description( + """The `maxDuration` is specified as a positive duration and has at most per-second granularity. + |This call returns an error if no schedule has been configured via `set_schedule` or if automatic + |pruning has been disabled via `clear_schedule`. Additionally if at the time of this modification, pruning is + |actively running, a best effort is made to pause pruning and restart according to the new schedule. This + |allows for the case that the new schedule no longer allows pruning at the current time. + """ + ) + def set_max_duration(maxDuration: PositiveDurationSeconds): Unit = + consoleEnvironment.run( + runner.adminCommand( + commands.SetMaxDurationCommand(maxDuration) + ) + ) + + @Help.Summary("Update the pruning retention used by automatic pruning.") + @Help.Description( + """The `retention` is specified as a positive duration and has at most per-second granularity. + |This call returns an error if no schedule has been configured via `set_schedule` or if automatic + |pruning has been disabled via `clear_schedule`. Additionally if at the time of this update, pruning is + |actively running, a best effort is made to pause pruning and restart with the newly specified retention. + |This allows for the case that the new retention mandates retaining more data than previously. + """ + ) + def set_retention(retention: PositiveDurationSeconds): Unit = + consoleEnvironment.run( + runner.adminCommand( + commands.SetRetentionCommand(retention) + ) + ) + + @Help.Summary("Inspect the automatic pruning schedule.") + @Help.Description( + """The schedule consists of a "cron" expression and "max_duration" and "retention" durations. The cron string + |indicates the points in time at which pruning should begin in the GMT time zone, and the maximum duration + |indicates how long from the start time pruning is allowed to run as long as pruning has not finished pruning + |up to the specified retention period. + |Returns `None` if no schedule has been configured via `set_schedule` or if `clear_schedule` has been invoked. + """ + ) + def get_schedule(): Option[PruningSchedule] = + consoleEnvironment.run( + runner.adminCommand(commands.GetScheduleCommand()) + ) + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/SequencerAdministrationGroup.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/SequencerAdministrationGroup.scala new file mode 100644 index 0000000000..5e4d5ce6da --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/SequencerAdministrationGroup.scala @@ -0,0 +1,274 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console.commands + +import com.digitalasset.canton.admin.api.client.commands.EnterpriseSequencerAdminCommands.LocatePruningTimestampCommand +import com.digitalasset.canton.admin.api.client.commands.{ + EnterpriseSequencerAdminCommands, + PruningSchedulerCommands, + SequencerAdminCommands, +} +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.console.{ + AdminCommandRunner, + ConsoleEnvironment, + FeatureFlag, + Help, + Helpful, +} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.domain.admin.v0.EnterpriseSequencerAdministrationServiceGrpc +import com.digitalasset.canton.domain.admin.v0.EnterpriseSequencerAdministrationServiceGrpc.EnterpriseSequencerAdministrationServiceStub +import com.digitalasset.canton.domain.sequencing.sequencer.{ + SequencerClients, + SequencerPruningStatus, + SequencerSnapshot, +} +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.time.EnrichedDurations.* +import com.digitalasset.canton.topology.Member +import com.digitalasset.canton.util.ShowUtil.* + +import scala.concurrent.duration.FiniteDuration +import scala.jdk.DurationConverters.* + +trait SequencerAdministrationGroupCommon extends ConsoleCommandGroup { + + @Help.Summary("Pruning of the sequencer") + object pruning + extends PruningSchedulerAdministration( + runner, + consoleEnvironment, + new PruningSchedulerCommands[EnterpriseSequencerAdministrationServiceStub]( + EnterpriseSequencerAdministrationServiceGrpc.stub, + _.setSchedule(_), + _.clearSchedule(_), + _.setCron(_), + _.setMaxDuration(_), + _.setRetention(_), + _.getSchedule(_), + ), + loggerFactory, + ) + with Helpful { + @Help.Summary("Status of the sequencer and its connected clients") + @Help.Description( + """Provides a detailed breakdown of information required for pruning: + | - the current time according to this sequencer instance + | - domain members that the sequencer supports + | - for each member when they were registered and whether they are enabled + | - a list of clients for each member, their last acknowledgement, and whether they are enabled + |""" + ) + def status(): SequencerPruningStatus = + consoleEnvironment.run { + runner.adminCommand(SequencerAdminCommands.GetPruningStatus) + } + + @Help.Summary("Remove unnecessary data from the Sequencer up until the default retention point") + @Help.Description( + """Removes unnecessary data from the Sequencer that is earlier than the default retention period. + |The default retention period is set in the configuration of the canton processing running this + |command under `parameters.retention-period-defaults.sequencer`. + |This pruning command requires that data is read and acknowledged by clients before + |considering it safe to remove. + | + |If no data is being removed it could indicate that clients are not reading or acknowledging data + |in a timely fashion (typically due to nodes going offline for long periods). + |You have the option of disabling the members running on these nodes to allow removal of this data, + |however this will mean that they will be unable to reconnect to the domain in the future. + |To do this run `force_prune(dryRun = true)` to return a description of which members would be + |disabled in order to prune the Sequencer. + |If you are happy to disable the described clients then run `force_prune(dryRun = false)` to + |permanently remove their unread data. + | + |Once offline clients have been disabled you can continue to run `prune` normally. + |""" + ) + def prune(): String = { + val defaultRetention = + consoleEnvironment.environment.config.parameters.retentionPeriodDefaults.sequencer + prune_with_retention_period(defaultRetention.underlying) + } + + @Help.Summary( + "Force remove data from the Sequencer including data that may have not been read by offline clients" + ) + @Help.Description( + """Will force pruning up until the default retention period by potentially disabling clients + |that have not yet read data we would like to remove. + |Disabling these clients will prevent them from ever reconnecting to the Domain so should only be + |used if the Domain operator is confident they can be permanently ignored. + |Run with `dryRun = true` to review a description of which clients will be disabled first. + |Run with `dryRun = false` to disable these clients and perform a forced pruning. + |""" + ) + def force_prune(dryRun: Boolean): String = { + val defaultRetention = + consoleEnvironment.environment.config.parameters.retentionPeriodDefaults.sequencer + force_prune_with_retention_period(defaultRetention.underlying, dryRun) + } + + @Help.Summary("Remove data that has been read up until a custom retention period") + @Help.Description( + "Similar to the above `prune` command but allows specifying a custom retention period" + ) + def prune_with_retention_period(retentionPeriod: FiniteDuration): String = { + val status = this.status() + val pruningTimestamp = status.now.minus(retentionPeriod.toJava) + + prune_at(pruningTimestamp) + } + + @Help.Summary( + "Force removing data from the Sequencer including data that may have not been read by offline clients up until a custom retention period" + ) + @Help.Description( + "Similar to the above `force_prune` command but allows specifying a custom retention period" + ) + def force_prune_with_retention_period( + retentionPeriod: FiniteDuration, + dryRun: Boolean, + ): String = { + val status = this.status() + val pruningTimestamp = status.now.minus(retentionPeriod.toJava) + + force_prune_at(pruningTimestamp, dryRun) + } + + @Help.Summary("Remove data that has been read up until the specified time") + @Help.Description( + """Similar to the above `prune` command but allows specifying the exact time at which to prune. + |The command will fail if a client has not yet read and acknowledged some data up to the specified time.""" + ) + def prune_at(timestamp: CantonTimestamp): String = { + val status = this.status() + val unauthenticatedMembers = + status.unauthenticatedMembersToDisable( + consoleEnvironment.environment.config.parameters.retentionPeriodDefaults.unauthenticatedMembers.toInternal + ) + unauthenticatedMembers.foreach(disable_member) + val msg = consoleEnvironment.run { + runner.adminCommand(EnterpriseSequencerAdminCommands.Prune(timestamp)) + } + s"$msg. Automatically disabled ${unauthenticatedMembers.size} unauthenticated member clients." + } + + @Help.Summary( + "Force removing data from the Sequencer including data that may have not been read by offline clients up until the specified time" + ) + @Help.Description( + "Similar to the above `force_prune` command but allows specifying the exact time at which to prune" + ) + def force_prune_at(timestamp: CantonTimestamp, dryRun: Boolean): String = { + val initialStatus = status() + val clientsToDisable = initialStatus.clientsPreventingPruning(timestamp) + + if (dryRun) { + formatDisableDryRun(timestamp, clientsToDisable) + } else { + disableClients(clientsToDisable) + + // check we can now prune for the provided timestamp + val statusAfterDisabling = status() + val safeTimestamp = statusAfterDisabling.safePruningTimestamp + + if (safeTimestamp < timestamp) + sys.error( + s"We disabled all clients preventing pruning at $timestamp however the safe timestamp is set to $safeTimestamp" + ) + + prune_at(timestamp) + } + } + + private def disableClients(toDisable: SequencerClients): Unit = + toDisable.members.foreach(disable_member) + + private def formatDisableDryRun( + timestamp: CantonTimestamp, + toDisable: SequencerClients, + ): String = { + val toDisableText = + toDisable.members.toSeq.map(member => show"- $member").map(m => s" $m (member)").sorted + + if (toDisableText.isEmpty) { + show"The Sequencer can be safely pruned for $timestamp without disabling clients" + } else { + val sb = new StringBuilder() + sb.append(s"To prune the Sequencer at $timestamp we will disable:") + toDisableText foreach { item => + sb.append(System.lineSeparator()) + sb.append(item) + } + sb.append(System.lineSeparator()) + sb.append( + "To disable these clients to allow for pruning at this point run force_prune with dryRun set to false" + ) + sb.toString() + } + } + + @Help.Summary("Obtain a timestamp at or near the beginning of sequencer state") + @Help.Description( + """This command provides insight into the current state of sequencer pruning when called with + |the default value of `index` 1. + |When pruning the sequencer manually via `prune_at` and with the intent to prune in batches, specify + |a value such as 1000 to obtain a pruning timestamp that corresponds to the "end" of the batch.""" + ) + def locate_pruning_timestamp( + index: PositiveInt = PositiveInt.tryCreate(1) + ): Option[CantonTimestamp] = + check(FeatureFlag.Preview) { + consoleEnvironment.run { + runner.adminCommand(LocatePruningTimestampCommand(index)) + } + } + + } + + protected def disable_member(member: Member): Unit + +} + +trait SequencerAdministrationDisableMember extends ConsoleCommandGroup { + + /** Disable the provided member at the sequencer preventing them from reading and writing, and allowing their + * data to be pruned. + */ + @Help.Summary( + "Disable the provided member at the Sequencer that will allow any unread data for them to be removed" + ) + @Help.Description("""This will prevent any client for the given member to reconnect the Sequencer + |and allow any unread/unacknowledged data they have to be removed. + |This should only be used if the domain operation is confident the member will never need + |to reconnect as there is no way to re-enable the member. + |To view members using the sequencer run `sequencer.status()`."""") + def disable_member(member: Member): Unit = consoleEnvironment.run { + runner.adminCommand(EnterpriseSequencerAdminCommands.DisableMember(member)) + } +} + +class SequencerAdministrationGroup( + val runner: AdminCommandRunner, + val consoleEnvironment: ConsoleEnvironment, + val loggerFactory: NamedLoggerFactory, +) extends SequencerAdministrationGroupCommon + with SequencerAdministrationDisableMember { + + /** Snapshot based on given snapshot to used as initial state by other sequencer nodes in the process of onboarding. + */ + def snapshot(timestamp: CantonTimestamp): SequencerSnapshot = + consoleEnvironment.run { + runner.adminCommand(EnterpriseSequencerAdminCommands.Snapshot(timestamp)) + } + +} + +trait SequencerAdministrationGroupX extends SequencerAdministrationGroupCommon { + + @Help.Summary("Methods used for repairing the node") + object repair extends ConsoleCommandGroup.Impl(this) with SequencerAdministrationDisableMember {} + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministration.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministration.scala new file mode 100644 index 0000000000..b6b1043ca6 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministration.scala @@ -0,0 +1,1282 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console.commands + +import cats.syntax.option.* +import com.daml.lf.data.Ref.PackageId +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.admin.api.client.commands.{GrpcAdminCommand, TopologyAdminCommands} +import com.digitalasset.canton.admin.api.client.data.* +import com.digitalasset.canton.config.{ConsoleCommandTimeout, NonNegativeDuration} +import com.digitalasset.canton.console.{ + AdminCommandRunner, + ConsoleCommandResult, + ConsoleEnvironment, + ConsoleMacros, + FeatureFlag, + Help, + Helpful, + InstanceReferenceCommon, +} +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.health.admin.data.TopologyQueueStatus +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.protocol.DynamicDomainParameters as DynamicDomainParametersInternal +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.admin.grpc.BaseQuery +import com.digitalasset.canton.topology.store.TopologyStoreId.AuthorizedStore +import com.digitalasset.canton.topology.store.{StoredTopologyTransactions, TimeQuery} +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.version.ProtocolVersion +import com.google.protobuf.ByteString + +import java.util.concurrent.atomic.AtomicReference + +abstract class TopologyAdministrationGroupCommon( + instance: InstanceReferenceCommon, + topologyQueueStatus: => Option[TopologyQueueStatus], + val consoleEnvironment: ConsoleEnvironment, + val loggerFactory: NamedLoggerFactory, +) extends Helpful { + + protected val runner: AdminCommandRunner = instance + + def owner_to_key_mappings: OwnerToKeyMappingsGroup + + // small cache to avoid repetitive calls to fetchId (as the id is immutable once set) + protected val idCache = + new AtomicReference[Option[UniqueIdentifier]](None) + + private[console] def clearCache(): Unit = { + idCache.set(None) + } + + protected def getIdCommand(): ConsoleCommandResult[UniqueIdentifier] + + private[console] def idHelper[T]( + apply: UniqueIdentifier => T + ): T = { + apply(idCache.get() match { + case Some(v) => v + case None => + val r = consoleEnvironment.run { + getIdCommand() + } + idCache.set(Some(r)) + r + }) + } + + @Help.Summary("Topology synchronisation helpers", FeatureFlag.Preview) + @Help.Group("Synchronisation Helpers") + object synchronisation { + + @Help.Summary("Check if the topology processing of a node is idle") + @Help.Description("""Topology transactions pass through a set of queues before becoming effective on a domain. + |This function allows to check if all the queues are empty. + |While both domain and participant nodes support similar queues, there is some ambiguity around + |the participant queues. While the domain does really know about all in-flight transactions at any + |point in time, a participant won't know about the state of any transaction that is currently being processed + |by the domain topology dispatcher.""") + def is_idle(): Boolean = + topologyQueueStatus + .forall(_.isIdle) // report un-initialised as idle to not break manual init process + + @Help.Summary("Wait until the topology processing of a node is idle") + @Help.Description("""This function waits until the `is_idle()` function returns true.""") + def await_idle( + timeout: NonNegativeDuration = consoleEnvironment.commandTimeouts.bounded + ): Unit = + ConsoleMacros.utils.retry_until_true(timeout)( + is_idle(), + s"topology queue status never became idle ${topologyQueueStatus} after ${timeout}", + ) + + /** run a topology change command synchronized and wait until the node becomes idle again */ + private[console] def run[T](timeout: Option[NonNegativeDuration])(func: => T): T = { + val ret = func + ConsoleMacros.utils.synchronize_topology(timeout)(consoleEnvironment) + ret + } + + /** run a topology change command synchronized and wait until the node becomes idle again */ + private[console] def runAdminCommand[T]( + timeout: Option[NonNegativeDuration] + )(grpcCommand: => GrpcAdminCommand[_, _, T]): T = { + val ret = consoleEnvironment.run(runner.adminCommand(grpcCommand)) + ConsoleMacros.utils.synchronize_topology(timeout)(consoleEnvironment) + ret + } + } + +} + +/** OwnerToKeyMappingsGroup to parameterize by different TopologyChangeOp/X + */ +abstract class OwnerToKeyMappingsGroup( + commandTimeouts: ConsoleCommandTimeout +) { + def rotate_key( + nodeInstance: InstanceReferenceCommon, + owner: Member, + currentKey: PublicKey, + newKey: PublicKey, + ): Unit +} + +trait InitNodeId extends ConsoleCommandGroup { + + @Help.Summary("Initialize the node with a unique identifier") + @Help.Description("""Every node in Canton is identified using a unique identifier, which is composed + |of a user-chosen string and the fingerprint of a signing key. The signing key is the root key + |defining a so-called namespace, where the signing key has the ultimate control over + |issuing new identifiers. + |During initialisation, we have to pick such a unique identifier. + |By default, initialisation happens automatically, but it can be turned off by setting the auto-init + |option to false. + |Automatic node initialisation is usually turned off to preserve the identity of a participant or domain + |node (during major version upgrades) or if the topology transactions are managed through + |a different topology manager than the one integrated into this node.""") + def init_id(identifier: Identifier, fingerprint: Fingerprint): UniqueIdentifier = + consoleEnvironment.run { + runner.adminCommand(TopologyAdminCommands.Init.InitId(identifier.unwrap, fingerprint.unwrap)) + } + +} + +class TopologyAdministrationGroup( + instance: InstanceReferenceCommon, + topologyQueueStatus: => Option[TopologyQueueStatus], + consoleEnvironment: ConsoleEnvironment, + loggerFactory: NamedLoggerFactory, +) extends TopologyAdministrationGroupCommon( + instance, + topologyQueueStatus, + consoleEnvironment, + loggerFactory, + ) + with InitNodeId { + + import runner.* + + override protected def getIdCommand(): ConsoleCommandResult[UniqueIdentifier] = + runner.adminCommand(TopologyAdminCommands.Init.GetId()) + + @Help.Summary("Upload signed topology transaction") + @Help.Description( + """Topology transactions can be issued with any topology manager. In some cases, such + |transactions need to be copied manually between nodes. This function allows for + |uploading previously exported topology transaction into the authorized store (which is + |the name of the topology managers transaction store.""" + ) + def load_transaction(bytes: ByteString): Unit = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Write.AddSignedTopologyTransaction(bytes) + ) + } + + @Help.Summary("Inspect topology stores") + @Help.Group("Topology stores") + object stores extends Helpful { + @Help.Summary("List available topology stores") + @Help.Description("""Topology transactions are stored in these stores. There are the following stores: + | + |"Authorized" - The authorized store is the store of a topology manager. Updates to the topology state are made + | by adding new transactions to the "Authorized" store. Both the participant and the domain nodes topology manager + | have such a store. + | A participant node will distribute all the content in the Authorized store to the domains it is connected to. + | The domain node will distribute the content of the Authorized store through the sequencer to the domain members + | in order to create the authoritative topology state on a domain (which is stored in the store named using the domain-id), + | such that every domain member will have the same view on the topology state on a particular domain. + | + |"" - The domain store is the authorized topology state on a domain. A participant has one store for each + | domain it is connected to. The domain has exactly one store with its domain-id. + | + |"Requested" - A domain can be configured such that when participant tries to register a topology transaction with + | the domain, the transaction is placed into the "Requested" store such that it can be analysed and processed with + | user defined process. + |""") + def list(): Seq[String] = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Read.ListStores() + ) + } + } + + @Help.Summary("Manage namespace delegations") + @Help.Group("Namespace delegations") + object namespace_delegations extends Helpful { + + @Help.Summary("Change namespace delegation") + @Help.Description( + """Delegates the authority to authorize topology transactions in a certain namespace to a certain + |key. The keys are referred to using their fingerprints. They need to be either locally generated or have been + |previously imported. + ops: Either Add or Remove the delegation. + namespace: The namespace whose authorization authority is delegated. + signedBy: Optional fingerprint of the authorizing key. The authorizing key needs to be either the authorizedKey + for root certificates. Otherwise, the signedBy key needs to refer to a previously authorized key, which + means that we use the signedBy key to refer to a locally available CA. + authorizedKey: Fingerprint of the key to be authorized. If signedBy equals authorizedKey, then this transaction + corresponds to a self-signed root certificate. If the keys differ, then we get an intermediate CA. + isRootDelegation: If set to true (default = false), the authorized key will be allowed to issue NamespaceDelegations. + synchronize: Synchronize timeout can be used to ensure that the state has been propagated into the node""" + ) + def authorize( + ops: TopologyChangeOp, + namespace: Fingerprint, + authorizedKey: Fingerprint, + isRootDelegation: Boolean = false, + signedBy: Option[Fingerprint] = None, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + // intentionally not documented force flag, as it is dangerous + force: Boolean = false, + ): ByteString = + synchronisation.run(synchronize)(consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Write + .AuthorizeNamespaceDelegation( + ops, + signedBy, + namespace, + authorizedKey, + isRootDelegation, + force, + ) + ) + }) + + @Help.Summary("List namespace delegation transactions") + @Help.Description("""List the namespace delegation transaction present in the stores. Namespace delegations + |are topology transactions that permission a key to issue topology transactions within + |a certain namespace. + + filterStore: Filter for topology stores starting with the given filter string (Authorized, , Requested) + useStateStore: If true (default), only properly authorized transactions that are part of the state will be selected. + timeQuery: The time query allows to customize the query by time. The following options are supported: + TimeQuery.HeadState (default): The most recent known state. + TimeQuery.Snapshot(ts): The state at a certain point in time. + TimeQuery.Range(fromO, toO): Time-range of when the transaction was added to the store + operation: Optionally, what type of operation the transaction should have. State store only has "Add". + filterSigningKey: Filter for transactions that are authorized with a key that starts with the given filter string. + filterNamespace: Filter for namespaces starting with the given filter string. + filterTargetKey: Filter for namespaces delegations for the given target key. + protocolVersion: Export the topology transactions in the optional protocol version. + """) + def list( + filterStore: String = "", + useStateStore: Boolean = true, + timeQuery: TimeQuery = TimeQuery.HeadState, + operation: Option[TopologyChangeOp] = None, + filterNamespace: String = "", + filterSigningKey: String = "", + filterTargetKey: Option[Fingerprint] = None, + protocolVersion: Option[String] = None, + ): Seq[ListNamespaceDelegationResult] = { + val delegations = consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Read.ListNamespaceDelegation( + BaseQuery( + filterStore, + useStateStore, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterNamespace, + ) + ) + } + + // TODO(i9419): Move authorization key filtering to the server side + filterTargetKey + .map { targetKey => + delegations.filter(_.item.target.fingerprint == targetKey) + } + .getOrElse(delegations) + } + } + + @Help.Summary("Manage identifier delegations") + @Help.Group("Identifier delegations") + object identifier_delegations extends Helpful { + + @Help.Summary("Change identifier delegation") + @Help.Description("""Delegates the authority of a certain identifier to a certain key. This corresponds to a normal + |certificate which binds identifier to a key. The keys are referred to using their fingerprints. + |They need to be either locally generated or have been previously imported. + ops: Either Add or Remove the delegation. + signedBy: Refers to the optional fingerprint of the authorizing key which in turn refers to a specific, locally existing certificate. + authorizedKey: Fingerprint of the key to be authorized. + synchronize: Synchronize timeout can be used to ensure that the state has been propagated into the node + """) + def authorize( + ops: TopologyChangeOp, + identifier: UniqueIdentifier, + authorizedKey: Fingerprint, + signedBy: Option[Fingerprint] = None, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + ): ByteString = + synchronisation.run(synchronize)(consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Write + .AuthorizeIdentifierDelegation(ops, signedBy, identifier, authorizedKey) + ) + }) + + @Help.Summary("List identifier delegation transactions") + @Help.Description("""List the identifier delegation transaction present in the stores. Identifier delegations + |are topology transactions that permission a key to issue topology transactions for a certain + |unique identifier. + + filterStore: Filter for topology stores starting with the given filter string (Authorized, , Requested) + useStateStore: If true (default), only properly authorized transactions that are part of the state will be selected. + timeQuery: The time query allows to customize the query by time. The following options are supported: + TimeQuery.HeadState (default): The most recent known state. + TimeQuery.Snapshot(ts): The state at a certain point in time. + TimeQuery.Range(fromO, toO): Time-range of when the transaction was added to the store + operation: Optionally, what type of operation the transaction should have. State store only has "Add". + filterSigningKey: Filter for transactions that are authorized with a key that starts with the given filter string. + filterUid: Filter for unique identifiers starting with the given filter string. + protocolVersion: Export the topology transactions in the optional protocol version. + |""") + def list( + filterStore: String = "", + useStateStore: Boolean = true, + timeQuery: TimeQuery = TimeQuery.HeadState, + operation: Option[TopologyChangeOp] = None, + filterUid: String = "", + filterSigningKey: String = "", + filterTargetKey: Option[Fingerprint] = None, + protocolVersion: Option[String] = None, + ): Seq[ListIdentifierDelegationResult] = { + val delegations = consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Read.ListIdentifierDelegation( + BaseQuery( + filterStore, + useStateStore, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterUid, + ) + ) + } + + // TODO(i9419): Move authorization key filtering to the server side + filterTargetKey + .map { targetKey => + delegations.filter(_.item.target.fingerprint == targetKey) + } + .getOrElse(delegations) + } + + } + + @Help.Summary("Manage owner to key mappings") + @Help.Group("Owner to key mappings") + object owner_to_key_mappings + extends OwnerToKeyMappingsGroup(consoleEnvironment.commandTimeouts) + with Helpful { + + @Help.Summary("Change an owner to key mapping") + @Help.Description("""Change a owner to key mapping. A key owner is anyone in the system that needs a key-pair known + |to all members (participants, mediator, sequencer, topology manager) of a domain. + ops: Either Add or Remove the key mapping update. + signedBy: Optional fingerprint of the authorizing key which in turn refers to a specific, locally existing certificate. + ownerType: Role of the following owner (Participant, Sequencer, Mediator, DomainTopologyManager) + owner: Unique identifier of the owner. + key: Fingerprint of key + purposes: The purposes of the owner to key mapping. + force: removing the last key is dangerous and must therefore be manually forced + synchronize: Synchronize timeout can be used to ensure that the state has been propagated into the node + """) + def authorize( + ops: TopologyChangeOp, + keyOwner: Member, + key: Fingerprint, + purpose: KeyPurpose, + signedBy: Option[Fingerprint] = None, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + force: Boolean = false, + ): ByteString = + synchronisation.run(synchronize)(consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Write + .AuthorizeOwnerToKeyMapping(ops, signedBy, keyOwner, key, purpose, force) + ) + }) + + @Help.Summary("List owner to key mapping transactions") + @Help.Description("""List the owner to key mapping transactions present in the stores. Owner to key mappings + |are topology transactions defining that a certain key is used by a certain key owner. + |Key owners are participants, sequencers, mediators and domains. + + filterStore: Filter for topology stores starting with the given filter string (Authorized, , Requested) + useStateStore: If true (default), only properly authorized transactions that are part of the state will be selected. + timeQuery: The time query allows to customize the query by time. The following options are supported: + TimeQuery.HeadState (default): The most recent known state. + TimeQuery.Snapshot(ts): The state at a certain point in time. + TimeQuery.Range(fromO, toO): Time-range of when the transaction was added to the store + operation: Optionally, what type of operation the transaction should have. State store only has "Add". + filterSigningKey: Filter for transactions that are authorized with a key that starts with the given filter string. + + filterKeyOwnerType: Filter for a particular type of key owner. + filterKeyOwnerUid: Filter for key owners unique identifier starting with the given filter string. + filterKeyPurpose: Filter for keys with a particular purpose (Encryption or Signing) + protocolVersion: Export the topology transactions in the optional protocol version. + |""") + def list( + filterStore: String = "", + useStateStore: Boolean = true, + timeQuery: TimeQuery = TimeQuery.HeadState, + operation: Option[TopologyChangeOp] = None, + filterKeyOwnerType: Option[MemberCode] = None, + filterKeyOwnerUid: String = "", + filterKeyPurpose: Option[KeyPurpose] = None, + filterSigningKey: String = "", + protocolVersion: Option[String] = None, + ): Seq[ListOwnerToKeyMappingResult] = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Read.ListOwnerToKeyMapping( + BaseQuery( + filterStore, + useStateStore, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterKeyOwnerType, + filterKeyOwnerUid, + filterKeyPurpose, + ) + ) + } + + @Help.Summary("Rotate the key for an owner to key mapping") + @Help.Description( + """Rotates the key for an existing owner to key mapping by issuing a new owner to key mapping with the new key + |and removing the previous owner to key mapping with the previous key. + + nodeInstance: The node instance that is used to verify that both current and new key pertain to this node. + |This avoids conflicts when there are different nodes with the same uuid (i.e., multiple sequencers). + owner: The owner of the owner to key mapping + currentKey: The current public key that will be rotated + newKey: The new public key that has been generated + |""" + ) + def rotate_key( + nodeInstance: InstanceReferenceCommon, + owner: Member, + currentKey: PublicKey, + newKey: PublicKey, + ): Unit = { + + val keysInStore = nodeInstance.keys.secret.list().map(_.publicKey) + require( + keysInStore.contains(currentKey), + "The current key must exist and pertain to this node", + ) + require(keysInStore.contains(newKey), "The new key must exist and pertain to this node") + require(currentKey.purpose == newKey.purpose, "The rotated keys must have the same purpose") + + // Authorize the new key + // The owner will now have two keys, but by convention the first one added is always + // used by everybody. + authorize( + TopologyChangeOp.Add, + owner, + newKey.fingerprint, + newKey.purpose, + ).discard + + // Remove the old key by sending the matching `Remove` transaction + authorize( + TopologyChangeOp.Remove, + owner, + currentKey.fingerprint, + currentKey.purpose, + ).discard + } + } + + @Help.Summary("Manage party to participant mappings") + @Help.Group("Party to participant mappings") + object party_to_participant_mappings extends Helpful { + + @Help.Summary("Change party to participant mapping", FeatureFlag.Preview) + @Help.Description("""Change the association of a party to a participant. If both identifiers are in the same namespace, then the + |request-side is Both. If they differ, then we need to say whether the request comes from the + |party (RequestSide.From) or from the participant (RequestSide.To). And, we need the matching request + |of the other side. + |Please note that this is a preview feature due to the fact that inhomogeneous topologies can not yet be properly + |represented on the Ledger API. + ops: Either Add or Remove the mapping + signedBy: Refers to the optional fingerprint of the authorizing key which in turn refers to a specific, locally existing certificate. + party: The unique identifier of the party we want to map to a participant. + participant: The unique identifier of the participant to which the party is supposed to be mapped. + side: The request side (RequestSide.From if we the transaction is from the perspective of the party, RequestSide.To from the participant.) + privilege: The privilege of the given participant which allows us to restrict an association (e.g. Confirmation or Observation). + replaceExisting: If true (default), replace any existing mapping with the new setting + synchronize: Synchronize timeout can be used to ensure that the state has been propagated into the node + """) + def authorize( + ops: TopologyChangeOp, + party: PartyId, + participant: ParticipantId, + side: RequestSide = RequestSide.Both, + permission: ParticipantPermission = ParticipantPermission.Submission, + signedBy: Option[Fingerprint] = None, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + replaceExisting: Boolean = true, + force: Boolean = false, + ): ByteString = + check(FeatureFlag.Preview)(synchronisation.run(synchronize)(consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Write.AuthorizePartyToParticipant( + ops, + signedBy, + side, + party, + participant, + permission, + replaceExisting = replaceExisting, + force = force, + ) + ) + })) + + @Help.Summary("List party to participant mapping transactions") + @Help.Description( + """List the party to participant mapping transactions present in the stores. Party to participant mappings + |are topology transactions used to allocate a party to a certain participant. The same party can be allocated + |on several participants with different privileges. + |A party to participant mapping has a request-side that identifies whether the mapping is authorized by the + |party, by the participant or by both. In order to have a party be allocated to a given participant, we therefore + |need either two transactions (one with RequestSide.From, one with RequestSide.To) or one with RequestSide.Both. + + filterStore: Filter for topology stores starting with the given filter string (Authorized, , Requested) + useStateStore: If true (default), only properly authorized transactions that are part of the state will be selected. + timeQuery: The time query allows to customize the query by time. The following options are supported: + TimeQuery.HeadState (default): The most recent known state. + TimeQuery.Snapshot(ts): The state at a certain point in time. + TimeQuery.Range(fromO, toO): Time-range of when the transaction was added to the store + operation: Optionally, what type of operation the transaction should have. State store only has "Add". + filterSigningKey: Filter for transactions that are authorized with a key that starts with the given filter string. + + filterParty: Filter for parties starting with the given filter string. + filterParticipant: Filter for participants starting with the given filter string. + filterRequestSide: Optional filter for a particular request side (Both, From, To). + protocolVersion: Export the topology transactions in the optional protocol version. + |""" + ) + def list( + filterStore: String = "", + useStateStore: Boolean = true, + timeQuery: TimeQuery = TimeQuery.HeadState, + operation: Option[TopologyChangeOp] = None, + filterParty: String = "", + filterParticipant: String = "", + filterRequestSide: Option[RequestSide] = None, + filterPermission: Option[ParticipantPermission] = None, + filterSigningKey: String = "", + protocolVersion: Option[String] = None, + ): Seq[ListPartyToParticipantResult] = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Read.ListPartyToParticipant( + BaseQuery( + filterStore, + useStateStore, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterParty, + filterParticipant, + filterRequestSide, + filterPermission, + ) + ) + } + } + @Help.Summary("Inspect all topology transactions at once") + @Help.Group("All Transactions") + object all extends Helpful { + @Help.Summary("List all transaction") + @Help.Description( + """List all topology transactions in a store, independent of the particular type. This method is useful for + |exporting entire states. + + filterStore: Filter for topology stores starting with the given filter string (Authorized, , Requested) + useStateStore: If true (default), only properly authorized transactions that are part of the state will be selected. + timeQuery: The time query allows to customize the query by time. The following options are supported: + TimeQuery.HeadState (default): The most recent known state. + TimeQuery.Snapshot(ts): The state at a certain point in time. + TimeQuery.Range(fromO, toO): Time-range of when the transaction was added to the store + operation: Optionally, what type of operation the transaction should have. State store only has "Add". + filterAuthorizedKey: Filter the topology transactions by the key that has authorized the transactions. + protocolVersion: Export the topology transactions in the optional protocol version. + |""" + ) + def list( + filterStore: String = AuthorizedStore.filterName, + useStateStore: Boolean = true, + timeQuery: TimeQuery = TimeQuery.HeadState, + operation: Option[TopologyChangeOp] = None, + filterAuthorizedKey: Option[Fingerprint] = None, + protocolVersion: Option[String] = None, + ): StoredTopologyTransactions[TopologyChangeOp] = { + val storedTransactions = consoleEnvironment + .run { + adminCommand( + TopologyAdminCommands.Read.ListAll( + BaseQuery( + filterStore, + useStateStore, + timeQuery, + operation, + filterSigningKey = "", + protocolVersion.map(ProtocolVersion.tryCreate), + ) + ) + ) + } + + // TODO(i9419): Move authorization key filtering to the server side + filterAuthorizedKey + .map { authKey => + val filteredResult = + storedTransactions.result.filter(_.transaction.key.fingerprint == authKey) + storedTransactions.copy(result = filteredResult) + } + .getOrElse(storedTransactions) + } + + @Help.Summary( + "Renew all topology transactions that have been authorized with a previous key using a new key" + ) + @Help.Description( + """Finds all topology transactions that have been authorized by `filterAuthorizedKey` and renews those topology transactions + |by authorizing them with the new key `authorizeWith`. + + filterAuthorizedKey: Filter the topology transactions by the key that has authorized the transactions. + authorizeWith: The key to authorize the renewed topology transactions. + |""" + ) + def renew( + filterAuthorizedKey: Fingerprint, + authorizeWith: Fingerprint, + ): Unit = { + + // First we check that the new key has at least the same permissions as the previous key in terms of namespace + // delegations and identifier delegations. + + // The namespaces and identifiers that the old key can operate on + val oldKeyNamespaces = namespace_delegations + .list(filterStore = AuthorizedStore.filterName, filterTargetKey = filterAuthorizedKey.some) + .map(_.item.namespace) + val oldKeyIdentifiers = identifier_delegations + .list(filterStore = AuthorizedStore.filterName, filterTargetKey = filterAuthorizedKey.some) + .map(_.item.identifier) + + // The namespaces and identifiers that the new key can operate on + val newKeyNamespaces = namespace_delegations + .list(filterStore = AuthorizedStore.filterName, filterTargetKey = authorizeWith.some) + .map(_.item.namespace) + .toSet + val newKeyIdentifiers = identifier_delegations + .list(filterStore = AuthorizedStore.filterName, filterTargetKey = authorizeWith.some) + .map(_.item.identifier) + .toSet + + oldKeyNamespaces.foreach { ns => + if (!newKeyNamespaces.contains(ns)) + throw new IllegalArgumentException( + s"The new key is not authorized for namespace $ns" + ) + } + + oldKeyIdentifiers.foreach { uid => + if (!newKeyIdentifiers.contains(uid) && !newKeyNamespaces.contains(uid.namespace)) + throw new IllegalArgumentException( + s"The new key is not authorized for the identifier $uid nor the namespace of the identifier ${uid.namespace}" + ) + } + + val existingTxs = list(filterAuthorizedKey = Some(filterAuthorizedKey)) + + // TODO(i9419): Move renewal to the server side + existingTxs.result + .foreach { storedTx => + storedTx.transaction.transaction match { + case TopologyStateUpdate( + TopologyChangeOp.Add, + TopologyStateUpdateElement(_id, update), + ) => + update match { + case NamespaceDelegation(namespace, target, isRootDelegation) => + def renewNamespaceDelegations(op: TopologyChangeOp, key: Fingerprint): Unit = + namespace_delegations + .authorize( + op, + namespace.fingerprint, + target.fingerprint, + isRootDelegation, + key.some, + ) + .discard + + renewNamespaceDelegations(TopologyChangeOp.Add, authorizeWith) + renewNamespaceDelegations(TopologyChangeOp.Remove, filterAuthorizedKey) + + case IdentifierDelegation(identifier, target) => + def renewIdentifierDelegation(op: TopologyChangeOp, key: Fingerprint): Unit = + identifier_delegations + .authorize( + op, + identifier, + target.fingerprint, + key.some, + ) + .discard + + renewIdentifierDelegation(TopologyChangeOp.Add, authorizeWith) + renewIdentifierDelegation(TopologyChangeOp.Remove, filterAuthorizedKey) + + case OwnerToKeyMapping(owner, key) => + def renewOwnerToKeyMapping(op: TopologyChangeOp, nsKey: Fingerprint): Unit = + owner_to_key_mappings + .authorize( + op, + owner, + key.fingerprint, + key.purpose, + nsKey.some, + ) + .discard + + renewOwnerToKeyMapping(TopologyChangeOp.Add, authorizeWith) + renewOwnerToKeyMapping(TopologyChangeOp.Remove, filterAuthorizedKey) + + case signedClaim: SignedLegalIdentityClaim => + () + + case ParticipantState(side, domain, participant, permission, trustLevel) => + def renewParticipantState(op: TopologyChangeOp, key: Fingerprint): Unit = + participant_domain_states + .authorize( + op, + domain, + participant, + side, + permission, + trustLevel, + key.some, + ) + .discard + + renewParticipantState(TopologyChangeOp.Add, authorizeWith) + renewParticipantState(TopologyChangeOp.Remove, filterAuthorizedKey) + + case MediatorDomainState(side, domain, mediator) => + def renewMediatorState(op: TopologyChangeOp, key: Fingerprint): Unit = + mediator_domain_states + .authorize( + op, + domain, + mediator, + side, + key.some, + ) + .discard + + renewMediatorState(TopologyChangeOp.Add, authorizeWith) + renewMediatorState(TopologyChangeOp.Remove, filterAuthorizedKey) + + case PartyToParticipant(side, party, participant, permission) => + def renewPartyToParticipant(op: TopologyChangeOp, key: Fingerprint): Unit = + party_to_participant_mappings + .authorize( + op, + party, + participant, + side, + permission, + key.some, + ) + .discard + + renewPartyToParticipant(TopologyChangeOp.Add, authorizeWith) + renewPartyToParticipant(TopologyChangeOp.Remove, filterAuthorizedKey) + + case VettedPackages(participant, packageIds) => + def renewVettedPackages(op: TopologyChangeOp, key: Fingerprint): Unit = + vetted_packages + .authorize( + op, + participant, + packageIds, + key.some, + ) + .discard + + renewVettedPackages(TopologyChangeOp.Add, authorizeWith) + renewVettedPackages(TopologyChangeOp.Remove, filterAuthorizedKey) + } + + case TopologyStateUpdate(TopologyChangeOp.Remove, _element) => + // We don't have to renew removed topology transactions + () + + case DomainGovernanceTransaction(element) => + element.mapping match { + case DomainParametersChange(domainId, domainParameters) => + domain_parameters_changes + .authorizeInternal( + domainId, + domainParameters, + authorizeWith.some, + ) + .discard[ByteString] + } + } + + } + + } + } + + @Help.Summary("Inspect participant domain states") + @Help.Group("Participant Domain States") + object participant_domain_states extends Helpful { + + @Help.Summary("List participant domain states") + @Help.Description("""List the participant domain transactions present in the stores. Participant domain states + |are topology transactions used to permission a participant on a given domain. + |A participant domain state has a request-side that identifies whether the mapping is authorized by the + |participant (From), by the domain (To) or by both (Both). + |In order to use a participant on a domain, both have to authorize such a mapping. This means that by + |authorizing such a topology transaction, a participant acknowledges its presence on a domain, whereas + |a domain permissions the participant on that domain. + | + filterStore: Filter for topology stores starting with the given filter string (Authorized, , Requested) + useStateStore: If true (default), only properly authorized transactions that are part of the state will be selected. + timeQuery: The time query allows to customize the query by time. The following options are supported: + TimeQuery.HeadState (default): The most recent known state. + TimeQuery.Snapshot(ts): The state at a certain point in time. + TimeQuery.Range(fromO, toO): Time-range of when the transaction was added to the store + operation: Optionally, what type of operation the transaction should have. State store only has "Add". + filterSigningKey: Filter for transactions that are authorized with a key that starts with the given filter string. + + filterDomain: Filter for domains starting with the given filter string. + filterParticipant: Filter for participants starting with the given filter string. + protocolVersion: Export the topology transactions in the optional protocol version. + |""") + def list( + filterStore: String = "", + useStateStore: Boolean = true, + timeQuery: TimeQuery = TimeQuery.HeadState, + operation: Option[TopologyChangeOp] = None, + filterDomain: String = "", + filterParticipant: String = "", + filterSigningKey: String = "", + protocolVersion: Option[String] = None, + ): Seq[ListParticipantDomainStateResult] = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Read.ListParticipantDomainState( + BaseQuery( + filterStore, + useStateStore, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterDomain, + filterParticipant, + ) + ) + } + + @Help.Summary("Change participant domain states") + @Help.Description("""Change the association of a participant to a domain. + |In order to activate a participant on a domain, we need both authorisation: the participant authorising + |its uid to be present on a particular domain and the domain to authorise the presence of a participant + |on said domain. + |If both identifiers are in the same namespace, then the request-side can be Both. If they differ, then + |we need to say whether the request comes from the domain (RequestSide.From) or from the participant + |(RequestSide.To). And, we need the matching request of the other side. + ops: Either Add or Remove the mapping + signedBy: Refers to the optional fingerprint of the authorizing key which in turn refers to a specific, locally existing certificate. + domain: The unique identifier of the domain we want the participant to join. + participant: The unique identifier of the participant. + side: The request side (RequestSide.From if we the transaction is from the perspective of the domain, RequestSide.To from the participant.) + permission: The privilege of the given participant which allows us to restrict an association (e.g. Confirmation or Observation). Will use the lower of if different between To/From. + trustLevel: The trust level of the participant on the given domain. Will use the lower of if different between To/From. + replaceExisting: If true (default), replace any existing mapping with the new setting + synchronize: Synchronize timeout can be used to ensure that the state has been propagated into the node + """) + def authorize( + ops: TopologyChangeOp, + domain: DomainId, + participant: ParticipantId, + side: RequestSide, + permission: ParticipantPermission = ParticipantPermission.Submission, + trustLevel: TrustLevel = TrustLevel.Ordinary, + signedBy: Option[Fingerprint] = None, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + replaceExisting: Boolean = true, + ): ByteString = + synchronisation.run(synchronize)(consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Write.AuthorizeParticipantDomainState( + ops, + signedBy, + side, + domain, + participant, + permission, + trustLevel, + replaceExisting = replaceExisting, + ) + ) + }) + + @Help.Summary("Returns true if the given participant is currently active on the given domain") + @Help.Description( + """Active means that the participant has been granted at least observation rights on the domain + |and that the participant has registered a domain trust certificate""" + ) + def active(domainId: DomainId, participantId: ParticipantId): Boolean = { + val (notBlocked, from, to) = + list( + filterStore = domainId.filterString, + filterParticipant = participantId.filterString, + ).iterator + .map(x => (x.item.side, x.item.permission)) + .foldLeft((true, false, false)) { + case ((false, _, _), _) | ((_, _, _), (_, ParticipantPermission.Disabled)) => + (false, false, false) + case (_, (RequestSide.Both, _)) => (true, true, true) + case ((_, _, to), (RequestSide.From, _)) => (true, true, to) + case ((_, from, _), (RequestSide.To, _)) => (true, from, true) + } + notBlocked && from && to + } + } + + @Help.Summary("Inspect mediator domain states") + @Help.Group("Mediator Domain States") + object mediator_domain_states extends Helpful { + + @Help.Summary("List mediator domain states") + @Help.Description("""List the mediator domain transactions present in the stores. Mediator domain states + |are topology transactions used to permission a mediator on a given domain. + |A mediator domain state has a request-side that identifies whether the mapping is authorized by the + |mediator (From), by the domain (To) or by both (Both). + |In order to use a mediator on a domain, both have to authorize such a mapping. This means that by + |authorizing such a topology transaction, a mediator acknowledges its presence on a domain, whereas + |a domain permissions the mediator on that domain. + | + filterStore: Filter for topology stores starting with the given filter string (Authorized, , Requested) + useStateStore: If true (default), only properly authorized transactions that are part of the state will be selected. + timeQuery: The time query allows to customize the query by time. The following options are supported: + TimeQuery.HeadState (default): The most recent known state. + TimeQuery.Snapshot(ts): The state at a certain point in time. + TimeQuery.Range(fromO, toO): Time-range of when the transaction was added to the store + operation: Optionally, what type of operation the transaction should have. State store only has "Add". + filterSigningKey: Filter for transactions that are authorized with a key that starts with the given filter string. + + filterDomain: Filter for domains starting with the given filter string. + filterMediator Filter for mediators starting with the given filter string. + protocolVersion: Export the topology transactions in the optional protocol version. + |""") + def list( + filterStore: String = "", + useStateStore: Boolean = true, + timeQuery: TimeQuery = TimeQuery.HeadState, + operation: Option[TopologyChangeOp] = None, + filterDomain: String = "", + filterMediator: String = "", + filterSigningKey: String = "", + protocolVersion: Option[String] = None, + ): Seq[ListMediatorDomainStateResult] = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Read.ListMediatorDomainState( + BaseQuery( + filterStore, + useStateStore, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterDomain, + filterMediator, + ) + ) + } + + @Help.Summary("Change mediator domain states") + @Help.Description("""Change the association of a mediator to a domain. + |In order to activate a mediator on a domain, we need both authorisation: the mediator authorising + |its uid to be present on a particular domain and the domain to authorise the presence of a mediator + |on said domain. + |If both identifiers are in the same namespace, then the request-side can be Both. If they differ, then + |we need to say whether the request comes from the domain (RequestSide.From) or from the mediator + |(RequestSide.To). And, we need the matching request of the other side. + ops: Either Add or Remove the mapping + signedBy: Refers to the optional fingerprint of the authorizing key which in turn refers to a specific, locally existing certificate. + domain: The unique identifier of the domain we want the mediator to join. + mediator: The unique identifier of the mediator. + side: The request side (RequestSide.From if we the transaction is from the perspective of the domain, RequestSide.To from the mediator.) + replaceExisting: If true (default), replace any existing mapping with the new setting + synchronize: Synchronize timeout can be used to ensure that the state has been propagated into the node + """) + def authorize( + ops: TopologyChangeOp, + domain: DomainId, + mediator: MediatorId, + side: RequestSide, + signedBy: Option[Fingerprint] = None, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + replaceExisting: Boolean = true, + ): ByteString = + synchronisation.run(synchronize)(consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Write.AuthorizeMediatorDomainState( + ops, + signedBy, + side, + domain, + mediator, + replaceExisting = replaceExisting, + ) + ) + }) + } + + @Help.Summary("Manage package vettings") + @Help.Group("Vetted Packages") + object vetted_packages extends Helpful { + + @Help.Summary("Change package vettings") + @Help.Description("""A participant will only process transactions that reference packages that all involved participants have + |vetted previously. Vetting is done by registering a respective topology transaction with the domain, + |which can then be used by other participants to verify that a transaction is only using + |vetted packages. + |Note that all referenced and dependent packages must exist in the package store. + |By default, only vetting transactions adding new packages can be issued. Removing package vettings + |and issuing package vettings for other participants (if their identity is controlled through this + |participants topology manager) or for packages that do not exist locally can only be run using + |the force = true flag. However, these operations are dangerous and can lead to the situation of a + |participant being unable to process transactions. + ops: Either Add or Remove the vetting. + participant: The unique identifier of the participant that is vetting the package. + packageIds: The lf-package ids to be vetted. + signedBy: Refers to the fingerprint of the authorizing key which in turn must be authorized by a valid, locally existing certificate. + | If none is given, a key is automatically determined. + synchronize: Synchronize timeout can be used to ensure that the state has been propagated into the node + force: Flag to enable dangerous operations (default false). Great power requires great care. + """) + def authorize( + ops: TopologyChangeOp, + participant: ParticipantId, + packageIds: Seq[PackageId], + signedBy: Option[Fingerprint] = None, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + force: Boolean = false, + ): ByteString = + synchronisation.run(synchronize)(consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Write + .AuthorizeVettedPackages(ops, signedBy, participant, packageIds, force) + ) + }) + + @Help.Summary("List package vetting transactions") + @Help.Description( + """List the package vetting transactions present in the stores. Participants must vet Daml packages and submitters + |must ensure that the receiving participants have vetted the package prior to submitting a transaction (done + |automatically during submission and validation). Vetting is done by authorizing such topology transactions + |and registering with a domain. + | + filterStore: Filter for topology stores starting with the given filter string (Authorized, , Requested) + useStateStore: If true (default), only properly authorized transactions that are part of the state will be selected. + timeQuery: The time query allows to customize the query by time. The following options are supported: + TimeQuery.HeadState (default): The most recent known state. + TimeQuery.Snapshot(ts): The state at a certain point in time. + TimeQuery.Range(fromO, toO): Time-range of when the transaction was added to the store + operation: Optionally, what type of operation the transaction should have. State store only has "Add". + filterSigningKey: Filter for transactions that are authorized with a key that starts with the given filter string. + filterParticipant: Filter for participants starting with the given filter string. + protocolVersion: Export the topology transactions in the optional protocol version. + |""" + ) + def list( + filterStore: String = "", + useStateStore: Boolean = true, + timeQuery: TimeQuery = TimeQuery.HeadState, + operation: Option[TopologyChangeOp] = None, + filterParticipant: String = "", + filterSigningKey: String = "", + protocolVersion: Option[String] = None, + ): Seq[ListVettedPackagesResult] = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Read.ListVettedPackages( + BaseQuery( + filterStore, + useStateStore, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterParticipant, + ) + ) + } + + } + + @Help.Summary("Manage domain parameters changes", FeatureFlag.Preview) + @Help.Group("Domain Parameters Changes") + object domain_parameters_changes extends Helpful { + @Help.Summary("Change domain parameters") + @Help.Description("""Authorize a transaction to change parameters of the domain. + |domainId: Id of the domain affected by the change. + |newParameters: New value of the domain parameters. + |protocolVersion: The protocol version of the domain + |signedBy: Refers to the fingerprint of the authorizing key which in turn must be authorized by a valid, locally existing certificate. + | If none is given, a key is automatically determined. + |synchronize: Synchronize timeout can be used to ensure that the state has been propagated into the node + |force: Enable potentially dangerous changes. Required to increase ``ledgerTimeRecordTimeTolerance``. + | + |Use ``myDomain.service.set_ledger_time_record_time_tolerance`` to securely increase ``ledgerTimeRecordTimeTolerance``. + """) + def authorize( + domainId: DomainId, + newParameters: DynamicDomainParameters, + protocolVersion: ProtocolVersion, + signedBy: Option[Fingerprint] = None, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + force: Boolean = false, + ): ByteString = { + synchronisation.run(synchronize)( + consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Write + .AuthorizeDomainParametersChange( + signedBy, + domainId, + newParameters, + force, + ) + ) + } + ) + } + + // This method accepts parameters in the internal format; used by [[all.renew]] above + private[TopologyAdministrationGroup] def authorizeInternal( + domainId: DomainId, + newParameters: DynamicDomainParametersInternal, + signedBy: Option[Fingerprint] = None, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + ): ByteString = synchronisation.run(synchronize)( + consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Write + .AuthorizeDomainParametersChangeInternal( + signedBy, + domainId, + newParameters, + force = false, + ) + ) + } + ) + + @Help.Summary("Get the latest domain parameters change") + @Help.Description("""Get the latest domain parameters change for the domain. + domainId: Id of the domain. + """) + def get_latest(domainId: DomainId): DynamicDomainParameters = + list(filterStore = domainId.filterString) + .sortBy(_.context.validFrom)(implicitly[Ordering[java.time.Instant]].reverse) + .headOption + .map(_.item) + .getOrElse( + throw new IllegalStateException("No dynamic domain parameters found in the domain") + ) + + @Help.Summary("List domain parameters changes transactions") + @Help.Description( + """List the domain parameters change transactions present in the stores for the specific domain. + filterStore: Filter for topology stores starting with the given filter string (Authorized, , Requested) + useStateStore: If true (default), only properly authorized transactions that are part of the state will be selected. + timeQuery: The time query allows to customize the query by time. The following options are supported: + TimeQuery.HeadState (default): The most recent known state. + TimeQuery.Snapshot(ts): The state at a certain point in time. + TimeQuery.Range(fromO, toO): Time-range of when the transaction was added to the store + filterSigningKey: Filter for transactions that are authorized with a key that starts with the given filter string. + protocolVersion: Export the topology transactions in the optional protocol version. + |""" + ) + def list( + filterStore: String = "", + useStateStore: Boolean = true, + timeQuery: TimeQuery = TimeQuery.HeadState, + filterSigningKey: String = "", + protocolVersion: Option[String] = None, + ): Seq[ListDomainParametersChangeResult] = { + val baseQuery = BaseQuery( + filterStore = filterStore, + useStateStore = useStateStore, + timeQuery = timeQuery, + ops = None, + filterSigningKey = filterSigningKey, + protocolVersion = protocolVersion.map(ProtocolVersion.tryCreate), + ) + + consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Read.ListDomainParametersChanges(baseQuery) + ) + } + } + } + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministrationX.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministrationX.scala new file mode 100644 index 0000000000..5242811e6c --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministrationX.scala @@ -0,0 +1,1875 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console.commands + +import cats.syntax.either.* +import com.daml.lf.data.Ref.PackageId +import com.daml.nameof.NameOf.functionFullName +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.admin.api.client.commands.{ + TopologyAdminCommands, + TopologyAdminCommandsX, +} +import com.digitalasset.canton.admin.api.client.data.topologyx.* +import com.digitalasset.canton.config +import com.digitalasset.canton.config.NonNegativeDuration +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt, PositiveLong} +import com.digitalasset.canton.console.{ + CommandErrors, + ConsoleCommandResult, + ConsoleEnvironment, + FeatureFlag, + FeatureFlagFilter, + Help, + Helpful, + InstanceReferenceCommon, + InstanceReferenceX, +} +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.health.admin.data.TopologyQueueStatus +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.protocol.DynamicDomainParameters +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.admin.grpc.BaseQueryX +import com.digitalasset.canton.topology.store.TopologyStoreId.AuthorizedStore +import com.digitalasset.canton.topology.store.{StoredTopologyTransactionsX, TimeQueryX} +import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX +import com.digitalasset.canton.topology.transaction.TopologyTransactionX.TxHash +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.version.ProtocolVersion +import com.google.protobuf.ByteString + +import scala.reflect.ClassTag + +trait InitNodeIdX extends ConsoleCommandGroup { + + @Help.Summary("Initialize the node with a unique identifier") + @Help.Description("""Every node in Canton is identified using a unique identifier, which is composed + |of a user-chosen string and the fingerprint of a signing key. The signing key is the root key + |defining a so-called namespace, where the signing key has the ultimate control over + |issuing new identifiers. + |During initialisation, we have to pick such a unique identifier. + |By default, initialisation happens automatically, but it can be turned off by setting the auto-init + |option to false. + |Automatic node initialisation is usually turned off to preserve the identity of a participant or domain + |node (during major version upgrades) or if the topology transactions are managed through + |a different topology manager than the one integrated into this node.""") + def init_id(identifier: UniqueIdentifier): Unit = + consoleEnvironment.run { + runner.adminCommand(TopologyAdminCommandsX.Init.InitId(identifier.toProtoPrimitive)) + } + +} + +class TopologyAdministrationGroupX( + instance: InstanceReferenceX, + topologyQueueStatus: => Option[TopologyQueueStatus], + consoleEnvironment: ConsoleEnvironment, + loggerFactory: NamedLoggerFactory, +) extends TopologyAdministrationGroupCommon( + instance, + topologyQueueStatus, + consoleEnvironment, + loggerFactory, + ) + with InitNodeIdX + with Helpful + with FeatureFlagFilter { + + import runner.* + + override protected def getIdCommand(): ConsoleCommandResult[UniqueIdentifier] = + runner.adminCommand(TopologyAdminCommandsX.Init.GetId()) + + @Help.Summary("Inspect all topology transactions at once") + @Help.Group("All Transactions") + object transactions { + + @Help.Summary("Downloads the node's topology identity transactions") + @Help.Description( + "The node's identity is defined by topology transactions of type NamespaceDelegationX and OwnerToKeyMappingX." + ) + def identity_transactions() + : Seq[SignedTopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] = { + val txs = instance.topology.transactions.list() + txs.result + .flatMap(tx => + tx.transaction + .selectMapping[NamespaceDelegationX] + .orElse(tx.transaction.selectMapping[OwnerToKeyMappingX]) + ) + .filter(_.transaction.mapping.namespace == instance.id.uid.namespace) + } + + @Help.Summary("Upload signed topology transaction") + @Help.Description( + """Topology transactions can be issued with any topology manager. In some cases, such + |transactions need to be copied manually between nodes. This function allows for + |uploading previously exported topology transaction into the authorized store (which is + |the name of the topology managers transaction store.""" + ) + def load_serialized(bytes: ByteString): Unit = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Write.AddSignedTopologyTransaction(bytes) + ) + } + + def load(transactions: Seq[GenericSignedTopologyTransactionX], store: String): Unit = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Write.AddTransactions(transactions, store) + ) + } + + def sign( + transactions: Seq[GenericSignedTopologyTransactionX], + signedBy: Seq[Fingerprint] = Seq(instance.id.uid.namespace.fingerprint), + ): Seq[GenericSignedTopologyTransactionX] = + consoleEnvironment.run { + adminCommand(TopologyAdminCommandsX.Write.SignTransactions(transactions, signedBy)) + } + + def authorize[M <: TopologyMappingX: ClassTag]( + txHash: TxHash, + mustBeFullyAuthorized: Boolean, + store: String, + signedBy: Seq[Fingerprint] = Seq(instance.id.uid.namespace.fingerprint), + ): SignedTopologyTransactionX[TopologyChangeOpX, M] = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Write.Authorize( + txHash.hash.toHexString, + mustFullyAuthorize = mustBeFullyAuthorized, + signedBy = signedBy, + store = store, + ) + ) + } + + @Help.Summary("List all transaction") + def list( + filterStore: String = AuthorizedStore.filterName, + proposals: Boolean = false, + timeQuery: TimeQueryX = TimeQueryX.HeadState, + operation: Option[TopologyChangeOpX] = None, + filterAuthorizedKey: Option[Fingerprint] = None, + protocolVersion: Option[String] = None, + ): StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX] = { + consoleEnvironment + .run { + adminCommand( + TopologyAdminCommandsX.Read.ListAll( + BaseQueryX( + filterStore, + proposals, + timeQuery, + operation, + filterSigningKey = filterAuthorizedKey.map(_.toProtoPrimitive).getOrElse(""), + protocolVersion.map(ProtocolVersion.tryCreate), + ) + ) + ) + } + } + + @Help.Summary("Manage topology transaction purging", FeatureFlag.Preview) + @Help.Group("Purge Topology Transactions") + object purge extends Helpful { + def list( + filterStore: String = "", + proposals: Boolean = false, + timeQuery: TimeQueryX = TimeQueryX.HeadState, + operation: Option[TopologyChangeOpX] = None, + filterDomain: String = "", + filterSigningKey: String = "", + protocolVersion: Option[String] = None, + ): Seq[ListPurgeTopologyTransactionXResult] = consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Read.PurgeTopologyTransactionX( + BaseQueryX( + filterStore, + proposals, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterDomain, + ) + ) + } + + // TODO(#15236): implement write service for purging + } + } + + object domain_bootstrap { + + @Help.Summary( + """Creates and returns proposals of topology transactions to bootstrap a domain, specifically + |DomainParametersStateX, SequencerDomainStateX, and MediatorDomainStateX.""".stripMargin + ) + def generate_genesis_topology( + domainId: DomainId, + domainOwners: Seq[Member], + sequencers: Seq[SequencerId], + mediators: Seq[MediatorId], + ): Seq[SignedTopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] = { + val isDomainOwner = domainOwners.contains(instance.id) + require(isDomainOwner, s"Only domain owners should call $functionFullName.") + + val thisNodeRootKey = Some(instance.id.uid.namespace.fingerprint) + + // create and sign the initial domain parameters + val domainParameterState = + instance.topology.domain_parameters.propose( + domainId, + DynamicDomainParameters + .initialValues( + consoleEnvironment.environment.clock, + ProtocolVersion.latest, + ), + signedBy = thisNodeRootKey, + store = Some(AuthorizedStore.filterName), + ) + + val mediatorState = + instance.topology.mediators.propose( + domainId, + threshold = PositiveInt.one, + group = NonNegativeInt.zero, + active = mediators, + signedBy = thisNodeRootKey, + store = Some(AuthorizedStore.filterName), + ) + + val sequencerState = + instance.topology.sequencers.propose( + domainId, + threshold = PositiveInt.one, + active = sequencers, + signedBy = thisNodeRootKey, + store = Some(AuthorizedStore.filterName), + ) + + Seq(domainParameterState, sequencerState, mediatorState) + } + } + + @Help.Summary("Manage unionspaces") + @Help.Group("Unionspaces") + object unionspaces extends Helpful { + def list( + filterStore: String = "", + proposals: Boolean = false, + timeQuery: TimeQueryX = TimeQueryX.HeadState, + operation: Option[TopologyChangeOpX] = None, + filterNamespace: String = "", + filterSigningKey: String = "", + protocolVersion: Option[String] = None, + ): Seq[ListUnionspaceDefinitionResult] = consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Read.ListUnionspaceDefinition( + BaseQueryX( + filterStore, + proposals, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterNamespace, + ) + ) + } + + @Help.Summary("Propose the creation of a new unionspace") + @Help.Description(""" + owners: the namespaces of the founding members of the unionspace, which are used to compute the name of the unionspace. + threshold: this threshold specifies the minimum number of signatures of unionspace members that are required to + satisfy authorization requirements on topology transactions for the namespace of the unionspace. + + store: - "Authorized": the topology transaction will be stored in the node's authorized store and automatically + propagated to connected domains, if applicable. + - "": the topology transaction will be directly submitted to the specified domain without + storing it locally first. This also means it will _not_ be synchronized to other domains + automatically. + mustFullyAuthorize: when set to true, the proposal's previously received signatures and the signature of this node must be + sufficient to fully authorize the topology transaction. if this is not the case, the request fails. + when set to false, the proposal retains the proposal status until enough signatures are accumulated to + satisfy the mapping's authorization requirements. + signedBy: the fingerprint of the key to be used to sign this proposal + serial: the expected serial this topology transaction should have. Serials must be contiguous and start at 1. + This transaction will be rejected if another fully authorized transaction with the same serial already + exists, or if there is a gap between this serial and the most recently used serial. + If None, the serial will be automatically selected by the node.""") + def propose( + owners: Set[Namespace], + threshold: PositiveInt, + store: String, + mustFullyAuthorize: Boolean = false, + // TODO(#14056) don't use the instance's root namespace key by default. + // let the grpc service figure out the right key to use, once that's implemented + signedBy: Option[Fingerprint] = Some(instance.id.uid.namespace.fingerprint), + serial: Option[PositiveInt] = None, + ): SignedTopologyTransactionX[TopologyChangeOpX, UnionspaceDefinitionX] = + consoleEnvironment.run { + NonEmpty + .from(owners) match { + case Some(ownersNE) => + adminCommand( + { + TopologyAdminCommandsX.Write.Propose( + UnionspaceDefinitionX + .create( + UnionspaceDefinitionX.computeNamespace(owners), + threshold, + ownersNE, + ), + signedBy = signedBy.toList, + serial = serial, + change = TopologyChangeOpX.Replace, + mustFullyAuthorize = mustFullyAuthorize, + forceChange = false, + store = store, + ) + } + ) + case None => + CommandErrors.GenericCommandError("Proposed unionspace needs at least one owner") + } + } + + def join( + unionspace: Fingerprint, + owner: Option[Fingerprint] = Some(instance.id.uid.namespace.fingerprint), + ): GenericSignedTopologyTransactionX = { + ??? + } + + def leave( + unionspace: Fingerprint, + owner: Option[Fingerprint] = Some(instance.id.uid.namespace.fingerprint), + ): ByteString = { + ByteString.EMPTY + } + } + + @Help.Summary("Manage namespace delegations") + @Help.Group("Namespace delegations") + object namespace_delegations extends Helpful { + @Help.Summary("Propose a new namespace delegation") + @Help.Description( + """A namespace delegation allows the owner of a namespace to delegate signing privileges for + |topology transactions on behalf of said namespace to additional signing keys. + + namespace: the namespace for which the target key can be used to sign topology transactions + targetKey: the target key to be used for signing topology transactions on behalf of the namespace + isRootDelegation: when set to true, the target key may authorize topology transactions with any kind of mapping, + including other namespace delegations. + when set to false, the target key may not authorize other namespace delegations for this namespace. + + store: - "Authorized": the topology transaction will be stored in the node's authorized store and automatically + propagated to connected domains, if applicable. + - "": the topology transaction will be directly submitted to the specified domain without + storing it locally first. This also means it will _not_ be synchronized to other domains + automatically. + mustFullyAuthorize: when set to true, the proposal's previously received signatures and the signature of this node must be + sufficient to fully authorize the topology transaction. If this is not the case, the request fails. + When set to false, the proposal retains the proposal status until enough signatures are accumulated to + satisfy the mapping's authorization requirements. + serial: the expected serial this topology transaction should have. Serials must be contiguous and start at 1. + This transaction will be rejected if another fully authorized transaction with the same serial already + exists, or if there is a gap between this serial and the most recently used serial. + If None, the serial will be automatically selected by the node. + """ + ) + def propose_delegation( + namespace: Namespace, + targetKey: SigningPublicKey, + isRootDelegation: Boolean, + store: String = AuthorizedStore.filterName, + mustFullyAuthorize: Boolean = true, + serial: Option[PositiveInt] = None, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + ): SignedTopologyTransactionX[TopologyChangeOpX, NamespaceDelegationX] = + synchronisation.runAdminCommand(synchronize)( + TopologyAdminCommandsX.Write.Propose( + NamespaceDelegationX.create(namespace, targetKey, isRootDelegation), + signedBy = Seq(instance.id.uid.namespace.fingerprint), + store = store, + serial = serial, + change = TopologyChangeOpX.Replace, + mustFullyAuthorize = mustFullyAuthorize, + forceChange = false, + ) + ) + + @Help.Summary("Revoke an existing namespace delegation") + @Help.Description( + """A namespace delegation allows the owner of a namespace to delegate signing privileges for + |topology transactions on behalf of said namespace to additional signing keys. + + namespace: the namespace for which the target key should be revoked + targetKey: the target key to be revoked + + store: - "Authorized": the topology transaction will be stored in the node's authorized store and automatically + propagated to connected domains, if applicable. + - "": the topology transaction will be directly submitted to the specified domain without + storing it locally first. This also means it will _not_ be synchronized to other domains + automatically. + mustFullyAuthorize: when set to true, the proposal's previously received signatures and the signature of this node must be + sufficient to fully authorize the topology transaction. If this is not the case, the request fails. + When set to false, the proposal retains the proposal status until enough signatures are accumulated to + satisfy the mapping's authorization requirements. + serial: the expected serial this topology transaction should have. Serials must be contiguous and start at 1. + This transaction will be rejected if another fully authorized transaction with the same serial already + exists, or if there is a gap between this serial and the most recently used serial. + If None, the serial will be automatically selected by the node. + force: must be set to true when performing a dangerous operation, such as revoking a root certificate + """ + ) + def propose_revocation( + namespace: Namespace, + targetKey: SigningPublicKey, + store: String = AuthorizedStore.filterName, + mustFullyAuthorize: Boolean = true, + serial: Option[PositiveInt] = None, + force: Boolean = false, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + ): SignedTopologyTransactionX[TopologyChangeOpX, NamespaceDelegationX] = { + list( + store, + filterNamespace = namespace.toProtoPrimitive, + filterTargetKey = Some(targetKey.id), + ) match { + case Seq(nsd) => + synchronisation.runAdminCommand(synchronize)( + TopologyAdminCommandsX.Write.Propose( + nsd.item, + signedBy = Seq(instance.id.uid.namespace.fingerprint), + store = store, + serial = serial, + change = TopologyChangeOpX.Remove, + mustFullyAuthorize = mustFullyAuthorize, + forceChange = force, + ) + ) + + case Nil => + throw new IllegalArgumentException( + s"Namespace delegation from namespace $namespace to key ${targetKey.id} not found." + ) + case multiple => + throw new IllegalStateException( + s"The query for namespace $namespace and target key ${targetKey.id} unexpectedly yielded multiple results: ${multiple + .map(_.item)}" + ) + } + } + + def list( + filterStore: String = "", + proposals: Boolean = false, + timeQuery: TimeQueryX = TimeQueryX.HeadState, + operation: Option[TopologyChangeOpX] = None, + filterNamespace: String = "", + filterSigningKey: String = "", + filterTargetKey: Option[Fingerprint] = None, + protocolVersion: Option[String] = None, + ): Seq[ListNamespaceDelegationResult] = consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Read.ListNamespaceDelegation( + BaseQueryX( + filterStore, + proposals, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterNamespace, + filterTargetKey, + ) + ) + } + } + + @Help.Summary("Manage identifier delegations") + @Help.Group("Identifier delegations") + object identifier_delegations extends Helpful { + + @Help.Summary("Propose new identifier delegations") + @Help.Description( + """An identifier delegation allows the owner of a unique identifier to delegate signing privileges for + |topology transactions on behalf of said identifier to additional/specific signing keys. + + uid: the unique identifier for which the target key can be used to sign topology transactions + targetKey: the target key to be used for signing topology transactions on behalf of the unique identifier + + store: - "Authorized": the topology transaction will be stored in the node's authorized store and automatically + propagated to connected domains, if applicable. + - "": the topology transaction will be directly submitted to the specified domain without + storing it locally first. This also means it will _not_ be synchronized to other domains + automatically. + mustFullyAuthorize: when set to true, the proposal's previously received signatures and the signature of this node must be + sufficient to fully authorize the topology transaction. if this is not the case, the request fails. + when set to false, the proposal retains the proposal status until enough signatures are accumulated to + satisfy the mapping's authorization requirements. + serial: the expected serial this topology transaction should have. Serials must be contiguous and start at 1. + This transaction will be rejected if another fully authorized transaction with the same serial already + exists, or if there is a gap between this serial and the most recently used serial. + If None, the serial will be automatically selected by the node. + """ + ) + def propose( + uid: UniqueIdentifier, + targetKey: SigningPublicKey, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + // Using the authorized store by default + store: String = AuthorizedStore.filterName, + mustFullyAuthorize: Boolean = true, + serial: Option[PositiveInt] = None, + ): SignedTopologyTransactionX[TopologyChangeOpX, IdentifierDelegationX] = { + val command = TopologyAdminCommandsX.Write.Propose( + mapping = IdentifierDelegationX( + identifier = uid, + target = targetKey, + ), + signedBy = Seq(instance.id.uid.namespace.fingerprint), + serial = serial, + mustFullyAuthorize = mustFullyAuthorize, + store = store, + ) + + synchronisation.runAdminCommand(synchronize)(command) + } + + def list( + filterStore: String = "", + proposals: Boolean = false, + timeQuery: TimeQueryX = TimeQueryX.HeadState, + operation: Option[TopologyChangeOpX] = None, + filterUid: String = "", + filterSigningKey: String = "", + filterTargetKey: Option[Fingerprint] = None, + protocolVersion: Option[String] = None, + ): Seq[ListIdentifierDelegationResult] = consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Read.ListIdentifierDelegation( + BaseQueryX( + filterStore, + proposals, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterUid, + filterTargetKey, + ) + ) + } + } + + // TODO(#14057) complete @Help.Description's (by adapting TopologyAdministrationGroup-non-X descriptions) + @Help.Summary("Manage owner to key mappings") + @Help.Group("Owner to key mappings") + object owner_to_key_mappings + extends OwnerToKeyMappingsGroup(consoleEnvironment.commandTimeouts) + with Helpful { + + @Help.Summary("List owner to key mapping transactions") + def list( + filterStore: String = "", + proposals: Boolean = false, + timeQuery: TimeQueryX = TimeQueryX.HeadState, + operation: Option[TopologyChangeOpX] = None, + filterKeyOwnerType: Option[MemberCode] = None, + filterKeyOwnerUid: String = "", + filterSigningKey: String = "", + protocolVersion: Option[String] = None, + ): Seq[ListOwnerToKeyMappingResult] = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Read.ListOwnerToKeyMapping( + BaseQueryX( + filterStore, + proposals, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterKeyOwnerType, + filterKeyOwnerUid, + ) + ) + } + + @Help.Summary("Add a key to an owner to key mapping") + @Help.Description( + """Add a key to an owner to key mapping. A key owner is anyone in the system that needs a key-pair known + |to all members (participants, mediators, sequencers) of a domain. If no owner to key mapping exists for the + |specified key owner, create a new mapping with the specified key. The specified key needs to have + |been created previously via the `keys.secret` api. + + key: Fingerprint of the key + purpose: The key purpose, i.e. whether the key is for signing or encryption + keyOwner: The member that owns the key + domainId: The domain id if the owner to key mapping is specific to a domain + signedBy: Optional fingerprint of the authorizing key which in turn refers to a specific, locally existing certificate. + synchronize: Synchronize timeout can be used to ensure that the state has been propagated into the node + mustFullyAuthorize: Whether to only add the key if the member is in the position to authorize the change. + """ + ) + def add_key( + key: Fingerprint, + purpose: KeyPurpose, + keyOwner: Member = instance.id.member, + domainId: Option[DomainId] = None, + signedBy: Option[Fingerprint] = None, + synchronize: Option[config.NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + mustFullyAuthorize: Boolean = true, // configurable in case of a key under a unionspace + ): Unit = propose( + key, + purpose, + keyOwner, + domainId, + signedBy, + synchronize, + add = true, + mustFullyAuthorize, + force = false, + instance, + ) + + @Help.Summary("Remove a key from an owner to key mapping") + @Help.Description( + """Remove a key from an owner to key mapping. A key owner is anyone in the system that needs a key-pair known + |to all members (participants, mediators, sequencers) of a domain. If the specified key is the last key in the + |owner to key mapping (which requires the force to be true), the owner to key mapping will be removed. + |The specified key needs to have been created previously via the `keys.secret` api. + + key: Fingerprint of the key + purpose: The key purpose, i.e. whether the key is for signing or encryption + keyOwner: The member that owns the key + domainId: The domain id if the owner to key mapping is specific to a domain + signedBy: Optional fingerprint of the authorizing key which in turn refers to a specific, locally existing certificate. + synchronize: Synchronize timeout can be used to ensure that the state has been propagated into the node + mustFullyAuthorize: Whether to only add the key if the member is in the position to authorize the change. + force: removing the last key is dangerous and must therefore be manually forced + """ + ) + def remove_key( + key: Fingerprint, + purpose: KeyPurpose, + keyOwner: Member = instance.id.member, + domainId: Option[DomainId] = None, + signedBy: Option[Fingerprint] = None, + synchronize: Option[config.NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + mustFullyAuthorize: Boolean = true, // configurable in case of a key under a unionspace + force: Boolean = false, + ): Unit = propose( + key, + purpose, + keyOwner, + domainId, + signedBy, + synchronize, + add = false, + mustFullyAuthorize, + force = force, + instance, + ) + + @Help.Summary("Rotate the key for an owner to key mapping") + @Help.Description( + """Rotates an existing key of the owner's owner to key mapping by adding the new key and removing the previous + |key. + + nodeInstance: The node instance that is used to verify that both the current and new key pertain to this node. + This avoids conflicts when there are different nodes with the same uuid (i.e., multiple sequencers). + owner: The member that owns the owner to key mapping + currentKey: The current public key that will be rotated + newKey: The new public key that has been generated + """ + ) + def rotate_key( + nodeInstance: InstanceReferenceCommon, + member: Member, + currentKey: PublicKey, + newKey: PublicKey, + ): Unit = nodeInstance match { + case nodeInstanceX: InstanceReferenceX => + val keysInStore = nodeInstance.keys.secret.list().map(_.publicKey) + require( + keysInStore.contains(currentKey), + "The current key must exist and pertain to this node", + ) + require(keysInStore.contains(newKey), "The new key must exist and pertain to this node") + require(currentKey.purpose == newKey.purpose, "The rotated keys must have the same purpose") + + val domainIds = list( + filterStore = AuthorizedStore.filterName, + operation = Some(TopologyChangeOpX.Replace), + filterKeyOwnerUid = member.filterString, + filterKeyOwnerType = Some(member.code), + proposals = false, + ).collect { case res if res.item.keys.contains(currentKey) => res.item.domain } + + require(domainIds.nonEmpty, "The current key is not authorized in any owner to key mapping") + + // TODO(#12945): Remove this workaround once the TopologyManagerX is able to determine the signing key + // among its IDDs and NSDs. + val signingKeyForNow = Some(nodeInstanceX.id.uid.namespace.fingerprint) + + domainIds.foreach { maybeDomainId => + // Authorize the new key + // The owner will now have two keys, but by convention the first one added is always + // used by everybody. + propose( + newKey.fingerprint, + newKey.purpose, + member, + domainId = maybeDomainId, + signedBy = signingKeyForNow, + add = true, + nodeInstance = nodeInstanceX, + ) + + // Remove the old key by sending the matching `Remove` transaction + propose( + currentKey.fingerprint, + currentKey.purpose, + member, + domainId = maybeDomainId, + signedBy = signingKeyForNow, + add = false, + nodeInstance = nodeInstanceX, + ) + } + case _ => + throw new IllegalArgumentException( + "InstanceReferenceX.owner_to_key_mapping.rotate_key called with a non-XNode" + ) + } + + private def propose( + key: Fingerprint, + purpose: KeyPurpose, + keyOwner: Member, + domainId: Option[DomainId], + signedBy: Option[Fingerprint] = None, + synchronize: Option[config.NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + add: Boolean, + mustFullyAuthorize: Boolean = true, + force: Boolean = false, + nodeInstance: InstanceReferenceX, + ): Unit = { + // Ensure the specified key has a private key in the vault. + val publicKey = + nodeInstance.keys.secret + .list( + filterFingerprint = key.toProtoPrimitive, + purpose = Set(purpose), + ) match { + case privateKeyMetadata +: Nil => privateKeyMetadata.publicKey + case Nil => + throw new IllegalArgumentException("The specified key is unknown to the key owner") + case multipleKeys => + throw new IllegalArgumentException( + s"Found ${multipleKeys.size} keys where only one key was expected. Specify a full key instead of a prefix" + ) + } + + // Look for an existing authorized OKM mapping. + val maybePreviousState = expectAtMostOneResult( + list( + filterStore = AuthorizedStore.filterName, + filterKeyOwnerUid = keyOwner.filterString, + filterKeyOwnerType = Some(keyOwner.code), + proposals = false, + ).filter(_.item.domain == domainId) + ).map(res => (res.item, res.context.operation, res.context.serial)) + + val (proposedMapping, serial, ops) = if (add) { + // Add key to mapping with serial + 1 or create new mapping. + maybePreviousState match { + case None => + ( + OwnerToKeyMappingX(keyOwner, domainId, NonEmpty(Seq, publicKey)), + PositiveInt.one, + TopologyChangeOpX.Replace, + ) + case Some((_, TopologyChangeOpX.Remove, previousSerial)) => + ( + OwnerToKeyMappingX(keyOwner, domainId, NonEmpty(Seq, publicKey)), + previousSerial.increment, + TopologyChangeOpX.Replace, + ) + case Some((okm, TopologyChangeOpX.Replace, previousSerial)) => + require( + !okm.keys.contains(publicKey), + "The owner-to-key mapping already contains the specified key to add", + ) + ( + okm.copy(keys = okm.keys :+ publicKey), + previousSerial.increment, + TopologyChangeOpX.Replace, + ) + } + } else { + // Remove key from mapping with serial + 1 or error. + maybePreviousState match { + case None | Some((_, TopologyChangeOpX.Remove, _)) => + throw new IllegalArgumentException( + "No authorized owner-to-key mapping exists for specified key owner" + ) + case Some((okm, TopologyChangeOpX.Replace, previousSerial)) => + require( + okm.keys.contains(publicKey), + "The owner-to-key mapping does not contain the specified key to remove", + ) + NonEmpty.from(okm.keys.filterNot(_ == publicKey)) match { + case Some(fewerKeys) => + (okm.copy(keys = fewerKeys), previousSerial.increment, TopologyChangeOpX.Replace) + case None => + (okm, previousSerial.increment, TopologyChangeOpX.Remove) + } + } + } + + synchronisation + .runAdminCommand(synchronize)( + TopologyAdminCommandsX.Write + .Propose( + mapping = proposedMapping, + signedBy = signedBy.toList, + change = ops, + serial = Some(serial), + mustFullyAuthorize = mustFullyAuthorize, + forceChange = force, + store = AuthorizedStore.filterName, + ) + ) + .discard + } + } + + @Help.Summary("Manage party to participant mappings") + @Help.Group("Party to participant mappings") + object party_to_participant_mappings extends Helpful { + + @Help.Summary("Change party to participant mapping") + @Help.Description("""Change the association of a party to hosting participants. + party: The unique identifier of the party whose set of participants or permission to modify. + adds: The unique identifiers of the participants to host the party each specifying the participant's permissions + (submission, confirmation, observation). If the party already hosts the specified participant, update the + participant's permissions. + removes: The unique identifiers of the participants that should no longer host the party. + domainId: The domain id if the party to participant mapping is specific to a domain. + signedBy: Refers to the optional fingerprint of the authorizing key which in turn refers to a specific, locally existing certificate. + synchronize: Synchronize timeout can be used to ensure that the state has been propagated into the node + mustFullyAuthorize: When set to true, the proposal's previously received signatures and the signature of this node must be + sufficient to fully authorize the topology transaction. If this is not the case, the request fails. + When set to false, the proposal retains the proposal status until enough signatures are accumulated to + satisfy the mapping's authorization requirements. + store: - "Authorized": The topology transaction will be stored in the node's authorized store and automatically + propagated to connected domains, if applicable. + - "": The topology transaction will be directly submitted to the specified domain without + storing it locally first. This also means it will _not_ be synchronized to other domains + automatically. + """) + def propose_delta( + party: PartyId, + adds: List[(ParticipantId, ParticipantPermissionX)] = Nil, + removes: List[ParticipantId] = Nil, + domainId: Option[DomainId] = None, + signedBy: Option[Fingerprint] = Some( + instance.id.uid.namespace.fingerprint + ), // TODO(#12945) don't use the instance's root namespace key by default. + synchronize: Option[config.NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + mustFullyAuthorize: Boolean = false, + store: String = AuthorizedStore.filterName, + ): SignedTopologyTransactionX[TopologyChangeOpX, PartyToParticipantX] = { + + val currentO = expectAtMostOneResult( + list( + filterStore = store, + filterParty = party.filterString, + ).filter(_.item.domainId == domainId) + ) + + val (existingPermissions, newSerial) = currentO match { + case Some(current) if current.context.operation == TopologyChangeOpX.Remove => + (Map.empty[ParticipantId, ParticipantPermissionX], Some(current.context.serial.increment)) + case Some(current) => + val currentPermissions = + current.item.participants.map(p => p.participantId -> p.permission).toMap + (currentPermissions, Some(current.context.serial.increment)) + case None => + (Map.empty[ParticipantId, ParticipantPermissionX], None) + } + + val newPermissions = new PartyToParticipantComputations(loggerFactory) + .computeNewPermissions( + existingPermissions = existingPermissions, + adds = adds, + removes = removes, + ) + .valueOr(err => throw new IllegalArgumentException(err)) + + propose( + party = party, + newParticipants = newPermissions.toSeq, + domainId = domainId, + signedBy = signedBy, + serial = newSerial, + synchronize = synchronize, + mustFullyAuthorize = mustFullyAuthorize, + store = store, + ) + } + + @Help.Summary("Replace party to participant mapping") + @Help.Description("""Replace the association of a party to hosting participants. + party: The unique identifier of the party whose set of participant permissions to modify. + newParticipants: The unique identifier of the participants to host the party. Each participant entry specifies + the participant's permissions (submission, confirmation, observation). + threshold: The threshold is `1` for regular parties and larger than `1` for "consortium parties". The threshold + indicates how many participant confirmations are needed in order to confirm a Daml transaction on + behalf the party. + domainId: The domain id if the party to participant mapping is specific to a domain. + signedBy: Refers to the optional fingerprint of the authorizing key which in turn refers to a specific, locally existing certificate. + serial: The expected serial this topology transaction should have. Serials must be contiguous and start at 1. + This transaction will be rejected if another fully authorized transaction with the same serial already + exists, or if there is a gap between this serial and the most recently used serial. + If None, the serial will be automatically selected by the node. + synchronize: Synchronize timeout can be used to ensure that the state has been propagated into the node + groupAddressing: If true, Daml transactions are sent to the consortium party rather than the hosting participants. + mustFullyAuthorize: When set to true, the proposal's previously received signatures and the signature of this node must be + sufficient to fully authorize the topology transaction. If this is not the case, the request fails. + When set to false, the proposal retains the proposal status until enough signatures are accumulated to + satisfy the mapping's authorization requirements. + store: - "Authorized": The topology transaction will be stored in the node's authorized store and automatically + propagated to connected domains, if applicable. + - "": The topology transaction will be directly submitted to the specified domain without + storing it locally first. This also means it will _not_ be synchronized to other domains + automatically. + """) + def propose( + party: PartyId, + newParticipants: Seq[(ParticipantId, ParticipantPermissionX)], + threshold: PositiveInt = PositiveInt.one, + domainId: Option[DomainId] = None, + signedBy: Option[Fingerprint] = Some( + instance.id.uid.namespace.fingerprint + ), // TODO(#12945) don't use the instance's root namespace key by default. + serial: Option[PositiveInt] = None, + synchronize: Option[config.NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + groupAddressing: Boolean = false, + mustFullyAuthorize: Boolean = false, + store: String = AuthorizedStore.filterName, + ): SignedTopologyTransactionX[TopologyChangeOpX, PartyToParticipantX] = { + val op = NonEmpty.from(newParticipants) match { + case Some(_) => TopologyChangeOpX.Replace + case None => TopologyChangeOpX.Remove + } + + val command = TopologyAdminCommandsX.Write.Propose( + mapping = PartyToParticipantX( + partyId = party, + domainId = domainId, + threshold = threshold, + participants = newParticipants.map((HostingParticipant.apply _) tupled), + groupAddressing = groupAddressing, + ), + signedBy = signedBy.toList, + serial = serial, + change = op, + mustFullyAuthorize = mustFullyAuthorize, + store = store, + ) + + synchronisation.runAdminCommand(synchronize)(command) + } + + @Help.Summary("List party to participant mapping transactions") + @Help.Description( + """List the party to participant mapping transactions present in the stores. Party to participant mappings + |are topology transactions used to allocate a party to certain participants. The same party can be allocated + |on several participants with different privileges. + + filterStore: - "Authorized": Look in the node's authorized store. + - "": Look in the specified domain store. + proposals: Whether to query proposals instead of authorized transactions. + timeQuery: The time query allows to customize the query by time. The following options are supported: + TimeQuery.HeadState (default): The most recent known state. + TimeQuery.Snapshot(ts): The state at a certain point in time. + TimeQuery.Range(fromO, toO): Time-range of when the transaction was added to the store + operation: Optionally, what type of operation the transaction should have. + filterParty: Filter for parties starting with the given filter string. + filterParticipant: Filter for participants starting with the given filter string. + filterSigningKey: Filter for transactions that are authorized with a key that starts with the given filter string. + protocolVersion: Export the topology transactions in the optional protocol version. + |""" + ) + def list( + filterStore: String = "", + proposals: Boolean = false, + timeQuery: TimeQueryX = TimeQueryX.HeadState, + operation: Option[TopologyChangeOpX] = None, + filterParty: String = "", + filterParticipant: String = "", + filterSigningKey: String = "", + protocolVersion: Option[String] = None, + ): Seq[ListPartyToParticipantResult] = consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Read.ListPartyToParticipant( + BaseQueryX( + filterStore, + proposals, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterParty, + filterParticipant, + ) + ) + } + } + + @Help.Summary("Manage domain trust certificates") + @Help.Group("Domain trust certificates") + object domain_trust_certificates extends Helpful { + def list( + filterStore: String = "", + proposals: Boolean = false, + timeQuery: TimeQueryX = TimeQueryX.HeadState, + operation: Option[TopologyChangeOpX] = None, + // TODO(#14048) should be filterDomain and filterParticipant + filterUid: String = "", + filterSigningKey: String = "", + protocolVersion: Option[String] = None, + ): Seq[ListDomainTrustCertificateResult] = consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Read.ListDomainTrustCertificate( + BaseQueryX( + filterStore, + proposals, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterUid, + ) + ) + } + + // TODO(#14057) document console command + def active(domainId: DomainId, participantId: ParticipantId): Boolean = + list(filterStore = domainId.filterString).exists { x => + x.item.domainId == domainId && x.item.participantId == participantId + } + + @Help.Summary("Propose a change to a participant's domain trust certificate.") + @Help.Description( + """A participant's domain trust certificate serves two functions: + |1. It signals to the domain that the participant would like to act on the domain. + |2. It controls whether contracts can be reassigned to any domain or only a specific set of domains. + + participantId: the identifier of the trust certificate's target participant + domainId: the identifier of the domain on which the participant would like to act + transferOnlyToGivenTargetDomains: whether or not to restrict reassignments to a set of domains + targetDomains: the set of domains to which the participant permits assignments of contracts + + store: - "Authorized": the topology transaction will be stored in the node's authorized store and automatically + propagated to connected domains, if applicable. + - "": the topology transaction will be directly submitted to the specified domain without + storing it locally first. This also means it will _not_ be synchronized to other domains + automatically. + mustFullyAuthorize: when set to true, the proposal's previously received signatures and the signature of this node must be + sufficient to fully authorize the topology transaction. if this is not the case, the request fails. + when set to false, the proposal retains the proposal status until enough signatures are accumulated to + satisfy the mapping's authorization requirements. + serial: the expected serial this topology transaction should have. Serials must be contiguous and start at 1. + This transaction will be rejected if another fully authorized transaction with the same serial already + exists, or if there is a gap between this serial and the most recently used serial. + If None, the serial will be automatically selected by the node. + """ + ) + def propose( + participantId: ParticipantId, + domainId: DomainId, + transferOnlyToGivenTargetDomains: Boolean, + targetDomains: Seq[DomainId], + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + // Using the authorized store by default, because the trust cert upon connecting to a domain is also stored in the authorized store + store: Option[String] = Some(AuthorizedStore.filterName), + mustFullyAuthorize: Boolean = true, + serial: Option[PositiveInt] = None, + ): SignedTopologyTransactionX[TopologyChangeOpX, DomainTrustCertificateX] = { + val cmd = TopologyAdminCommandsX.Write.Propose( + mapping = DomainTrustCertificateX( + participantId, + domainId, + transferOnlyToGivenTargetDomains, + targetDomains, + ), + signedBy = Seq(instance.id.uid.namespace.fingerprint), + store = store.getOrElse(domainId.filterString), + serial = serial, + mustFullyAuthorize = mustFullyAuthorize, + ) + synchronisation.runAdminCommand(synchronize)(cmd) + } + + } + + @Help.Summary("Inspect participant domain permissions") + @Help.Group("Participant Domain Permissions") + object participant_domain_permissions extends Helpful { + @Help.Summary("Propose changes to the domain permissions of participants.") + @Help.Description( + """Domain operators may use this command to change a participant's permissions on a domain. + + domainId: the target domain + participantId: the participant whose permissions should be changed + permission: the participant's permission + trustLevel: the participant's trust level + loginAfter: the earliest time a participant may connect to the domain + limits: domain limits for this participant + + store: - "Authorized": the topology transaction will be stored in the node's authorized store and automatically + propagated to connected domains, if applicable. + - "": the topology transaction will be directly submitted to the specified domain without + storing it locally first. This also means it will _not_ be synchronized to other domains + automatically. + mustFullyAuthorize: when set to true, the proposal's previously received signatures and the signature of this node must be + sufficient to fully authorize the topology transaction. if this is not the case, the request fails. + when set to false, the proposal retains the proposal status until enough signatures are accumulated to + satisfy the mapping's authorization requirements. + serial: the expected serial this topology transaction should have. Serials must be contiguous and start at 1. + This transaction will be rejected if another fully authorized transaction with the same serial already + exists, or if there is a gap between this serial and the most recently used serial. + If None, the serial will be automatically selected by the node.""" + ) + def propose( + domainId: DomainId, + participantId: ParticipantId, + permission: ParticipantPermissionX, + trustLevel: TrustLevelX = TrustLevelX.Ordinary, + loginAfter: Option[CantonTimestamp] = None, + limits: Option[ParticipantDomainLimits] = None, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + store: Option[String] = None, + mustFullyAuthorize: Boolean = false, + serial: Option[PositiveInt] = None, + ): SignedTopologyTransactionX[TopologyChangeOpX, ParticipantDomainPermissionX] = { + val cmd = TopologyAdminCommandsX.Write.Propose( + mapping = ParticipantDomainPermissionX( + domainId = domainId, + participantId = participantId, + permission = permission, + trustLevel = trustLevel, + limits = limits, + loginAfter = loginAfter, + ), + signedBy = Seq(instance.id.uid.namespace.fingerprint), + serial = serial, + store = store.getOrElse(domainId.filterString), + mustFullyAuthorize = mustFullyAuthorize, + ) + + synchronisation.runAdminCommand(synchronize)(cmd) + } + + def list( + filterStore: String = "", + proposals: Boolean = false, + timeQuery: TimeQueryX = TimeQueryX.HeadState, + operation: Option[TopologyChangeOpX] = None, + filterUid: String = "", + filterSigningKey: String = "", + protocolVersion: Option[String] = None, + ): Seq[ListParticipantDomainPermissionResult] = consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Read.ListParticipantDomainPermission( + BaseQueryX( + filterStore, + proposals, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterUid, + ) + ) + } + } + + @Help.Summary("Inspect participant domain states") + @Help.Group("Participant Domain States") + object participant_domain_states extends Helpful { + @Help.Summary("Returns true if the given participant is currently active on the given domain") + @Help.Description( + """Active means that the participant has been granted at least observation rights on the domain + |and that the participant has registered a domain trust certificate""" + ) + def active(domainId: DomainId, participantId: ParticipantId): Boolean = { + // TODO(#14048) Should we check the other side (domain accepts participant)? + domain_trust_certificates.active(domainId, participantId) + } + } + + @Help.Summary("Manage traffic control") + @Help.Group("Member traffic control") + object traffic_control { + @Help.Summary("List traffic control topology transactions.") + def list( + filterMember: String = instance.id.filterString, + filterStore: String = "", + proposals: Boolean = false, + timeQuery: TimeQueryX = TimeQueryX.HeadState, + operation: Option[TopologyChangeOpX] = None, + filterSigningKey: String = "", + protocolVersion: Option[String] = None, + ): Seq[ListTrafficStateResult] = consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Read.ListTrafficControlState( + BaseQueryX( + filterStore, + proposals, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterMember = filterMember, + ) + ) + } + + @Help.Summary("Top up traffic for this node") + @Help.Description( + """Use this command to update the new total traffic limit for the node. + The top up will have to be authorized by the domain to be accepted. + The newTotalTrafficAmount must be strictly increasing top up after top up.""" + ) + def top_up( + domainId: DomainId, + newTotalTrafficAmount: PositiveLong, + member: Member = instance.id.member, + serial: Option[PositiveInt] = None, + signedBy: Option[Fingerprint] = Some(instance.id.uid.namespace.fingerprint), + ): SignedTopologyTransactionX[TopologyChangeOpX, TrafficControlStateX] = { + consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Write.Propose( + TrafficControlStateX + .create( + domainId, + member, + newTotalTrafficAmount, + ), + signedBy = signedBy.toList, + serial = serial, + change = TopologyChangeOpX.Replace, + mustFullyAuthorize = true, + forceChange = false, + store = domainId.filterString, + ) + ) + } + } + } + + @Help.Summary("Manage party hosting limits") + @Help.Group("Party hosting limits") + object party_hosting_limits extends Helpful { + def list( + filterStore: String = "", + proposals: Boolean = false, + timeQuery: TimeQueryX = TimeQueryX.HeadState, + operation: Option[TopologyChangeOpX] = None, + filterUid: String = "", + filterSigningKey: String = "", + protocolVersion: Option[String] = None, + ): Seq[ListPartyHostingLimitsResult] = consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Read.ListPartyHostingLimits( + BaseQueryX( + filterStore, + proposals, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterUid, + ) + ) + } + + @Help.Summary("Propose a limitation of how many participants may host a certain party") + @Help.Description(""" + domainId: the domain on which to impose the limits for the given party + partyId: the party to which the hosting limits are applied + maxNumHostingParticipants: the maximum number of participants that may host the given party + + store: - "Authorized": the topology transaction will be stored in the node's authorized store and automatically + propagated to connected domains, if applicable. + - "": the topology transaction will be directly submitted to the specified domain without + storing it locally first. This also means it will _not_ be synchronized to other domains + automatically. + mustFullyAuthorize: when set to true, the proposal's previously received signatures and the signature of this node must be + sufficient to fully authorize the topology transaction. if this is not the case, the request fails. + when set to false, the proposal retains the proposal status until enough signatures are accumulated to + satisfy the mapping's authorization requirements. + signedBy: the fingerprint of the key to be used to sign this proposal + serial: the expected serial this topology transaction should have. Serials must be contiguous and start at 1. + This transaction will be rejected if another fully authorized transaction with the same serial already + exists, or if there is a gap between this serial and the most recently used serial. + If None, the serial will be automatically selected by the node.""") + def propose( + domainId: DomainId, + partyId: PartyId, + maxNumHostingParticipants: Int, + store: Option[String] = None, + mustFullyAuthorize: Boolean = false, + signedBy: Seq[Fingerprint] = Seq(instance.id.uid.namespace.fingerprint), + serial: Option[PositiveInt] = None, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + ): SignedTopologyTransactionX[TopologyChangeOpX, PartyHostingLimitsX] = { + synchronisation.runAdminCommand(synchronize)( + TopologyAdminCommandsX.Write.Propose( + PartyHostingLimitsX(domainId, partyId, maxNumHostingParticipants), + signedBy = signedBy, + store = store.getOrElse(domainId.toProtoPrimitive), + serial = serial, + change = TopologyChangeOpX.Replace, + mustFullyAuthorize = mustFullyAuthorize, + ) + ) + } + } + + @Help.Summary("Manage package vettings") + @Help.Group("Vetted Packages") + object vetted_packages extends Helpful { + + @Help.Summary("Change package vettings") + @Help.Description( + """A participant will only process transactions that reference packages that all involved participants have + |vetted previously. Vetting is done by registering a respective topology transaction with the domain, + |which can then be used by other participants to verify that a transaction is only using + |vetted packages. + |Note that all referenced and dependent packages must exist in the package store. + + participantId: the identifier of the participant vetting the packages + adds: The lf-package ids to be vetted. + removes: The lf-package ids to be unvetted. + domainId: The domain id if the package vetting is specific to a domain. + store: - "Authorized": the topology transaction will be stored in the node's authorized store and automatically + propagated to connected domains, if applicable. + - "": the topology transaction will be directly submitted to the specified domain without + storing it locally first. This also means it will _not_ be synchronized to other domains + automatically. + filterParticipant: Filter for participants starting with the given filter string. + mustFullyAuthorize: when set to true, the proposal's previously received signatures and the signature of this node must be + sufficient to fully authorize the topology transaction. if this is not the case, the request fails. + when set to false, the proposal retains the proposal status until enough signatures are accumulated to + satisfy the mapping's authorization requirements. + serial: the expected serial this topology transaction should have. Serials must be contiguous and start at 1. + This transaction will be rejected if another fully authorized transaction with the same serial already + exists, or if there is a gap between this serial and the most recently used serial. + If None, the serial will be automatically selected by the node. + signedBy: the fingerprint of the key to be used to sign this proposal + |""" + ) + def propose_delta( + participant: ParticipantId, + adds: Seq[PackageId] = Nil, + removes: Seq[PackageId] = Nil, + domainId: Option[DomainId] = None, + store: String = AuthorizedStore.filterName, + filterParticipant: String = "", + mustFullyAuthorize: Boolean = false, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + serial: Option[PositiveInt] = None, + signedBy: Option[Fingerprint] = Some( + instance.id.uid.namespace.fingerprint + ), // TODO(#12945) don't use the instance's root namespace key by default. + ): SignedTopologyTransactionX[TopologyChangeOpX, VettedPackagesX] = { + + // compute the diff and then call the propose method + val current0 = expectAtMostOneResult( + list(filterStore = store, filterParticipant = filterParticipant) + ) + + (adds, removes) match { + case (Nil, Nil) => + throw new IllegalArgumentException( + "Ensure that at least one of the two parameters (adds or removes) is not empty." + ) + case (_, _) => + val newDiffPackageIds = current0 match { + case Some(value) => ((value.item.packageIds ++ adds).diff(removes)).distinct + case None => (adds.diff(removes)).distinct + } + + propose( + participant = participant, + packageIds = newDiffPackageIds, + domainId, + store, + mustFullyAuthorize, + synchronize, + serial, + signedBy, + ) + } + } + @Help.Summary("Replace package vettings") + @Help.Description("""A participant will only process transactions that reference packages that all involved participants have + |vetted previously. Vetting is done by registering a respective topology transaction with the domain, + |which can then be used by other participants to verify that a transaction is only using + |vetted packages. + |Note that all referenced and dependent packages must exist in the package store. + + participantId: the identifier of the participant vetting the packages + packageIds: The lf-package ids to be vetted that will replace the previous vetted packages. + domainId: The domain id if the package vetting is specific to a domain. + store: - "Authorized": the topology transaction will be stored in the node's authorized store and automatically + propagated to connected domains, if applicable. + - "": the topology transaction will be directly submitted to the specified domain without + storing it locally first. This also means it will _not_ be synchronized to other domains automatically. + mustFullyAuthorize: when set to true, the proposal's previously received signatures and the signature of this node must be + sufficient to fully authorize the topology transaction. if this is not the case, the request fails. + when set to false, the proposal retains the proposal status until enough signatures are accumulated to + satisfy the mapping's authorization requirements. + serial: ted serial this topology transaction should have. Serials must be contiguous and start at 1. + This transaction will be rejected if another fully authorized transaction with the same serial already + exists, or if there is a gap between this serial and the most recently used serial. + If None, the serial will be automatically selected by the node. + signedBy: the fingerprint of the key to be used to sign this proposal + ops: Either Replace or Remove the vetting. Default to Replace. + |""") + def propose( + participant: ParticipantId, + packageIds: Seq[PackageId], + domainId: Option[DomainId] = None, + store: String = AuthorizedStore.filterName, + mustFullyAuthorize: Boolean = false, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + serial: Option[PositiveInt] = None, + signedBy: Option[Fingerprint] = Some( + instance.id.uid.namespace.fingerprint + ), // TODO(#12945) don't use the instance's root namespace key by default. + ): SignedTopologyTransactionX[TopologyChangeOpX, VettedPackagesX] = { + + val topologyChangeOpX = + if (packageIds.isEmpty) TopologyChangeOpX.Remove else TopologyChangeOpX.Replace + + val command = TopologyAdminCommandsX.Write.Propose( + mapping = VettedPackagesX( + participantId = participant, + domainId = domainId, + packageIds = packageIds, + ), + signedBy = signedBy.toList, + serial = serial, + change = topologyChangeOpX, + mustFullyAuthorize = mustFullyAuthorize, + store = store, + ) + + synchronisation.runAdminCommand(synchronize)(command) + } + + def list( + filterStore: String = "", + proposals: Boolean = false, + timeQuery: TimeQueryX = TimeQueryX.HeadState, + operation: Option[TopologyChangeOpX] = None, + filterParticipant: String = "", + filterSigningKey: String = "", + protocolVersion: Option[String] = None, + ): Seq[ListVettedPackagesResult] = consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Read.ListVettedPackages( + BaseQueryX( + filterStore, + proposals, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterParticipant, + ) + ) + } + } + + @Help.Summary("Manage authority-of mappings") + @Help.Group("Authority-of mappings") + object authority_of extends Helpful { + @Help.Summary("Propose a new AuthorityOf mapping.") + @Help.Description(""" + partyId: the party for which the authority delegation is granted + threshold: the minimum number of parties that need to authorize a daml (sub-)transaction for the authority of `partyId` to be granted. + parties: the parties that need to provide authorization for the authority of `partyId` to be granted. + domainId: the optional target domain on which the authority delegation is valid. + + store: - "Authorized": the topology transaction will be stored in the node's authorized store and automatically + propagated to connected domains, if applicable. + - "": the topology transaction will be directly submitted to the specified domain without + storing it locally first. This also means it will _not_ be synchronized to other domains + automatically. + mustFullyAuthorize: when set to true, the proposal's previously received signatures and the signature of this node must be + sufficient to fully authorize the topology transaction. if this is not the case, the request fails. + when set to false, the proposal retains the proposal status until enough signatures are accumulated to + satisfy the mapping's authorization requirements. + signedBy: the fingerprint of the key to be used to sign this proposal + serial: the expected serial this topology transaction should have. Serials must be contiguous and start at 1. + This transaction will be rejected if another fully authorized transaction with the same serial already + exists, or if there is a gap between this serial and the most recently used serial. + If None, the serial will be automatically selected by the node.""") + def propose( + partyId: PartyId, + threshold: Int, + parties: Seq[PartyId], + domainId: Option[DomainId] = None, + store: String = AuthorizedStore.filterName, + mustFullyAuthorize: Boolean = false, + // TODO(#14056) don't use the instance's root namespace key by default. + // let the grpc service figure out the right key to use, once that's implemented + signedBy: Option[Fingerprint] = Some(instance.id.uid.namespace.fingerprint), + serial: Option[PositiveInt] = None, + ): SignedTopologyTransactionX[TopologyChangeOpX, AuthorityOfX] = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Write.Propose( + AuthorityOfX( + partyId, + domainId, + PositiveInt.tryCreate(threshold), + parties, + ), + signedBy = signedBy.toList, + serial = serial, + store = store, + mustFullyAuthorize = mustFullyAuthorize, + ) + ) + } + + def list( + filterStore: String = "", + proposals: Boolean = false, + timeQuery: TimeQueryX = TimeQueryX.HeadState, + operation: Option[TopologyChangeOpX] = None, + filterParty: String = "", + filterSigningKey: String = "", + protocolVersion: Option[String] = None, + ): Seq[ListAuthorityOfResult] = consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Read.ListAuthorityOf( + BaseQueryX( + filterStore, + proposals, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterParty, + ) + ) + } + } + + @Help.Summary("Inspect mediator domain state") + @Help.Group("Mediator Domain State") + object mediators extends Helpful { + def list( + filterStore: String = "", + proposals: Boolean = false, + timeQuery: TimeQueryX = TimeQueryX.HeadState, + operation: Option[TopologyChangeOpX] = None, + filterDomain: String = "", + filterSigningKey: String = "", + protocolVersion: Option[String] = None, + ): Seq[ListMediatorDomainStateResult] = consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Read.MediatorDomainState( + BaseQueryX( + filterStore, + proposals, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterDomain, + ) + ) + } + + @Help.Summary("Propose changes to the mediator topology") + @Help.Description(""" + domainId: the target domain + threshold: the minimum number of mediators that need to come to a consensus for a message to be sent to other members. + active: the list of mediators that will take part in the mediator consensus in this mediator group + passive: the mediators that will receive all messages but will not participate in mediator consensus + group: the mediator group identifier + + + store: - "Authorized": the topology transaction will be stored in the node's authorized store and automatically + propagated to connected domains, if applicable. + - "": the topology transaction will be directly submitted to the specified domain without + storing it locally first. This also means it will _not_ be synchronized to other domains + automatically. + mustFullyAuthorize: when set to true, the proposal's previously received signatures and the signature of this node must be + sufficient to fully authorize the topology transaction. if this is not the case, the request fails. + when set to false, the proposal retains the proposal status until enough signatures are accumulated to + satisfy the mapping's authorization requirements. + signedBy: the fingerprint of the key to be used to sign this proposal + serial: the expected serial this topology transaction should have. Serials must be contiguous and start at 1. + This transaction will be rejected if another fully authorized transaction with the same serial already + exists, or if there is a gap between this serial and the most recently used serial. + If None, the serial will be automatically selected by the node.""") + def propose( + domainId: DomainId, + threshold: PositiveInt, + active: Seq[MediatorId], + passive: Seq[MediatorId] = Seq.empty, + group: NonNegativeInt, + store: Option[String] = None, + mustFullyAuthorize: Boolean = false, + // TODO(#14056) don't use the instance's root namespace key by default. + // let the grpc service figure out the right key to use, once that's implemented + signedBy: Option[Fingerprint] = Some(instance.id.uid.namespace.fingerprint), + serial: Option[PositiveInt] = None, + ): SignedTopologyTransactionX[TopologyChangeOpX, MediatorDomainStateX] = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Write.Propose( + mapping = MediatorDomainStateX + .create(domainId, group, threshold, active, passive), + signedBy = signedBy.toList, + serial = serial, + change = TopologyChangeOpX.Replace, + mustFullyAuthorize = mustFullyAuthorize, + forceChange = false, + store = store.getOrElse(domainId.filterString), + ) + ) + } + } + + @Help.Summary("Inspect sequencer domain state") + @Help.Group("Sequencer Domain State") + object sequencers extends Helpful { + def list( + filterStore: String = "", + proposals: Boolean = false, + timeQuery: TimeQueryX = TimeQueryX.HeadState, + operation: Option[TopologyChangeOpX] = None, + filterDomain: String = "", + filterSigningKey: String = "", + protocolVersion: Option[String] = None, + ): Seq[ListSequencerDomainStateResult] = consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Read.SequencerDomainState( + BaseQueryX( + filterStore, + proposals, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterDomain, + ) + ) + } + + @Help.Summary("Propose changes to the sequencer topology") + @Help.Description( + """ + domainId: the target domain + active: the list of active sequencers + passive: sequencers that receive messages but are not available for members to connect to + + store: - "Authorized": the topology transaction will be stored in the node's authorized store and automatically + propagated to connected domains, if applicable. + - "": the topology transaction will be directly submitted to the specified domain without + storing it locally first. This also means it will _not_ be synchronized to other domains + automatically. + mustFullyAuthorize: when set to true, the proposal's previously received signatures and the signature of this node must be + sufficient to fully authorize the topology transaction. if this is not the case, the request fails. + when set to false, the proposal retains the proposal status until enough signatures are accumulated to + satisfy the mapping's authorization requirements. + signedBy: the fingerprint of the key to be used to sign this proposal + serial: the expected serial this topology transaction should have. Serials must be contiguous and start at 1. + This transaction will be rejected if another fully authorized transaction with the same serial already + exists, or if there is a gap between this serial and the most recently used serial. + If None, the serial will be automatically selected by the node.""" + ) + def propose( + domainId: DomainId, + threshold: PositiveInt, + active: Seq[SequencerId], + passive: Seq[SequencerId] = Seq.empty, + store: Option[String] = None, + mustFullyAuthorize: Boolean = false, + // TODO(#14056) don't use the instance's root namespace key by default. + // let the grpc service figure out the right key to use, once that's implemented + signedBy: Option[Fingerprint] = Some(instance.id.uid.namespace.fingerprint), + serial: Option[PositiveInt] = None, + ): SignedTopologyTransactionX[TopologyChangeOpX, SequencerDomainStateX] = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Write.Propose( + mapping = SequencerDomainStateX.create(domainId, threshold, active, passive), + signedBy = signedBy.toList, + serial = serial, + change = TopologyChangeOpX.Replace, + mustFullyAuthorize = mustFullyAuthorize, + forceChange = false, + store = store.getOrElse(domainId.filterString), + ) + ) + } + } + + @Help.Summary("Manage domain parameters state", FeatureFlag.Preview) + @Help.Group("Domain Parameters State") + object domain_parameters extends Helpful { + def list( + filterStore: String = "", + proposals: Boolean = false, + timeQuery: TimeQueryX = TimeQueryX.HeadState, + operation: Option[TopologyChangeOpX] = None, + filterDomain: String = "", + filterSigningKey: String = "", + protocolVersion: Option[String] = None, + ): Seq[ListDomainParametersStateResult] = consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Read.DomainParametersState( + BaseQueryX( + filterStore, + proposals, + timeQuery, + operation, + filterSigningKey, + protocolVersion.map(ProtocolVersion.tryCreate), + ), + filterDomain, + ) + ) + } + + @Help.Summary("Propose changes to dynamic domain parameters") + @Help.Description(""" + domain: the target domain + parameters: the new dynamic domain parameters to be used on the domain + + store: - "Authorized": the topology transaction will be stored in the node's authorized store and automatically + propagated to connected domains, if applicable. + - "": the topology transaction will be directly submitted to the specified domain without + storing it locally first. This also means it will _not_ be synchronized to other domains + automatically. + mustFullyAuthorize: when set to true, the proposal's previously received signatures and the signature of this node must be + sufficient to fully authorize the topology transaction. if this is not the case, the request fails. + when set to false, the proposal retains the proposal status until enough signatures are accumulated to + satisfy the mapping's authorization requirements. + signedBy: the fingerprint of the key to be used to sign this proposal + serial: the expected serial this topology transaction should have. Serials must be contiguous and start at 1. + This transaction will be rejected if another fully authorized transaction with the same serial already + exists, or if there is a gap between this serial and the most recently used serial. + If None, the serial will be automatically selected by the node.""") + def propose( + domain: DomainId, + parameters: DynamicDomainParameters, + store: Option[String] = None, + mustFullyAuthorize: Boolean = false, + // TODO(#14056) don't use the instance's root namespace key by default. + // let the grpc service figure out the right key to use, once that's implemented + signedBy: Option[Fingerprint] = Some(instance.id.uid.namespace.fingerprint), + serial: Option[PositiveInt] = None, + ): SignedTopologyTransactionX[TopologyChangeOpX, DomainParametersStateX] = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Write.Propose( + // TODO(#14058) maybe don't just take default values for dynamic parameters + DomainParametersStateX( + domain, + parameters, + ), + signedBy.toList, + serial = serial, + mustFullyAuthorize = mustFullyAuthorize, + store = store.getOrElse(domain.filterString), + ) + ) + } + } + + @Help.Summary("Inspect topology stores") + @Help.Group("Topology stores") + object stores extends Helpful { + @Help.Summary("List available topology stores") + def list(): Seq[String] = + consoleEnvironment.run { + adminCommand( + TopologyAdminCommandsX.Read.ListStores() + ) + } + } + + private def expectAtMostOneResult[R](seq: Seq[R]): Option[R] = seq match { + case Nil => None + case res +: Nil => Some(res) + case multipleResults => + throw new IllegalStateException( + s"Found ${multipleResults.size} results, but expect at most one." + ) + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TrafficControlAdministrationGroup.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TrafficControlAdministrationGroup.scala new file mode 100644 index 0000000000..6b87a948b9 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TrafficControlAdministrationGroup.scala @@ -0,0 +1,70 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console.commands + +import com.digitalasset.canton.admin.api.client.commands.ParticipantAdminCommands +import com.digitalasset.canton.config.RequireTypes.{PositiveInt, PositiveLong} +import com.digitalasset.canton.console.{ + AdminCommandRunner, + ConsoleEnvironment, + FeatureFlagFilter, + Help, + Helpful, + InstanceReferenceX, +} +import com.digitalasset.canton.crypto.Fingerprint +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.transaction.{ + SignedTopologyTransactionX, + TopologyChangeOpX, + TrafficControlStateX, +} +import com.digitalasset.canton.traffic.MemberTrafficStatus + +class TrafficControlAdministrationGroup( + instance: InstanceReferenceX, + topology: TopologyAdministrationGroupX, + runner: AdminCommandRunner, + override val consoleEnvironment: ConsoleEnvironment, + override val loggerFactory: NamedLoggerFactory, +) extends Helpful + with FeatureFlagFilter { + + @Help.Summary("Return the traffic state of the node") + @Help.Description( + """Use this command to get the traffic state of the node at a given time for a specific domain ID.""" + ) + def traffic_state( + domainId: DomainId + ): MemberTrafficStatus = { + consoleEnvironment.run( + runner.adminCommand( + ParticipantAdminCommands.TrafficControl + .GetTrafficControlState(domainId) + ) + ) + } + + @Help.Summary("Top up traffic for this node") + @Help.Description( + """Use this command to update the new total traffic limit for the node.""" + ) + def top_up( + domainId: DomainId, + newTotalTrafficAmount: PositiveLong, + member: Member = instance.id.member, + serial: Option[PositiveInt] = None, + signedBy: Option[Fingerprint] = Some(instance.id.uid.namespace.fingerprint), + ): SignedTopologyTransactionX[TopologyChangeOpX, TrafficControlStateX] = { + topology.traffic_control.top_up( + domainId, + newTotalTrafficAmount, + member, + serial, + signedBy, + ) + } + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TrafficControlSequencerAdministrationGroup.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TrafficControlSequencerAdministrationGroup.scala new file mode 100644 index 0000000000..8e343ba068 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TrafficControlSequencerAdministrationGroup.scala @@ -0,0 +1,63 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console.commands + +import com.digitalasset.canton.admin.api.client.commands.SequencerAdminCommands +import com.digitalasset.canton.console.{ + AdminCommandRunner, + ConsoleEnvironment, + FeatureFlag, + FeatureFlagFilter, + Help, + Helpful, + InstanceReferenceX, +} +import com.digitalasset.canton.domain.sequencing.sequencer.traffic.SequencerTrafficStatus +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.topology.* + +class TrafficControlSequencerAdministrationGroup( + instance: InstanceReferenceX, + topology: TopologyAdministrationGroupX, + runner: AdminCommandRunner, + override val consoleEnvironment: ConsoleEnvironment, + override val loggerFactory: NamedLoggerFactory, +) extends TrafficControlAdministrationGroup( + instance, + topology, + runner, + consoleEnvironment, + loggerFactory, + ) + with Helpful + with FeatureFlagFilter { + + @Help.Summary("Return the traffic state of the given members") + @Help.Description( + """Use this command to get the traffic state of a list of members.""" + ) + def traffic_state_of_members( + members: Seq[Member] + ): SequencerTrafficStatus = { + consoleEnvironment.run( + runner.adminCommand( + SequencerAdminCommands.GetTrafficControlState(members) + ) + ) + } + + @Help.Summary("Return the traffic state of the all members") + @Help.Description( + """Use this command to get the traffic state of all members.""" + ) + def traffic_state_of_all_members: SequencerTrafficStatus = { + check(FeatureFlag.Preview)( + consoleEnvironment.run( + runner.adminCommand( + SequencerAdminCommands.GetTrafficControlState(Seq.empty) + ) + ) + ) + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/VaultAdministration.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/VaultAdministration.scala new file mode 100644 index 0000000000..1f66282258 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/VaultAdministration.scala @@ -0,0 +1,613 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console.commands + +import cats.data.EitherT +import cats.syntax.either.* +import com.digitalasset.canton.admin.api.client.commands.{TopologyAdminCommands, VaultAdminCommands} +import com.digitalasset.canton.admin.api.client.data.ListKeyOwnersResult +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.console.{ + AdminCommandRunner, + ConsoleEnvironment, + FeatureFlag, + FeatureFlagFilter, + Help, + Helpful, + InstanceReferenceCommon, +} +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.crypto.admin.grpc.PrivateKeyMetadata +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.topology.store.TopologyStoreId.AuthorizedStore +import com.digitalasset.canton.topology.{Member, MemberCode} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.{BinaryFileUtil, OptionUtil} +import com.digitalasset.canton.version.ProtocolVersion +import com.google.protobuf.ByteString + +import java.io.File +import java.nio.file.Files +import java.nio.file.attribute.PosixFilePermission.{OWNER_READ, OWNER_WRITE} +import java.time.Instant +import scala.concurrent.{ExecutionContext, Future} +import scala.jdk.CollectionConverters.* + +class SecretKeyAdministration( + instance: InstanceReferenceCommon, + runner: AdminCommandRunner, + override protected val consoleEnvironment: ConsoleEnvironment, + override protected val loggerFactory: NamedLoggerFactory, +) extends Helpful + with FeatureFlagFilter { + + import runner.* + + protected def regenerateKey(currentKey: PublicKey, name: Option[String]): PublicKey = { + currentKey match { + case encKey: EncryptionPublicKey => + instance.keys.secret.generate_encryption_key( + scheme = Some(encKey.scheme), + name = OptionUtil.noneAsEmptyString(name), + ) + case signKey: SigningPublicKey => + instance.keys.secret.generate_signing_key( + scheme = Some(signKey.scheme), + name = OptionUtil.noneAsEmptyString(name), + ) + case unknown => throw new IllegalArgumentException(s"Invalid public key type: $unknown") + } + } + + @Help.Summary("List keys in private vault") + @Help.Description("""Returns all public keys to the corresponding private keys in the key vault. + |Optional arguments can be used for filtering.""") + def list( + filterFingerprint: String = "", + filterName: String = "", + purpose: Set[KeyPurpose] = Set.empty, + ): Seq[PrivateKeyMetadata] = + consoleEnvironment.run { + adminCommand(VaultAdminCommands.ListMyKeys(filterFingerprint, filterName, purpose)) + } + + @Help.Summary("Generate new public/private key pair for signing and store it in the vault") + @Help.Description( + """ + |The optional name argument allows you to store an associated string for your convenience. + |The scheme can be used to select a key scheme and the default scheme is used if left unspecified.""" + ) + def generate_signing_key( + name: String = "", + scheme: Option[SigningKeyScheme] = None, + ): SigningPublicKey = { + consoleEnvironment.run { + adminCommand(VaultAdminCommands.GenerateSigningKey(name, scheme)) + } + } + + @Help.Summary("Generate new public/private key pair for encryption and store it in the vault") + @Help.Description( + """ + |The optional name argument allows you to store an associated string for your convenience. + |The scheme can be used to select a key scheme and the default scheme is used if left unspecified.""" + ) + def generate_encryption_key( + name: String = "", + scheme: Option[EncryptionKeyScheme] = None, + ): EncryptionPublicKey = { + consoleEnvironment.run { + adminCommand(VaultAdminCommands.GenerateEncryptionKey(name, scheme)) + } + } + + @Help.Summary( + "Register the specified KMS signing key in canton storing its public information in the vault" + ) + @Help.Description( + """ + |The id for the KMS signing key. + |The optional name argument allows you to store an associated string for your convenience.""" + ) + def register_kms_signing_key( + kmsKeyId: String, + name: String = "", + ): SigningPublicKey = { + consoleEnvironment.run { + adminCommand(VaultAdminCommands.RegisterKmsSigningKey(kmsKeyId, name)) + } + } + + @Help.Summary( + "Register the specified KMS encryption key in canton storing its public information in the vault" + ) + @Help.Description( + """ + |The id for the KMS encryption key. + |The optional name argument allows you to store an associated string for your convenience.""" + ) + def register_kms_encryption_key( + kmsKeyId: String, + name: String = "", + ): EncryptionPublicKey = { + consoleEnvironment.run { + adminCommand(VaultAdminCommands.RegisterKmsEncryptionKey(kmsKeyId, name)) + } + } + + private def findPublicKey( + fingerprint: String, + topologyAdmin: TopologyAdministrationGroupCommon, + owner: Member, + ): PublicKey = + findPublicKeys(topologyAdmin, owner).find(_.fingerprint.unwrap == fingerprint) match { + case Some(key) => key + case None => + throw new IllegalStateException( + s"The key $fingerprint does not exist" + ) + } + + @Help.Summary("Rotate a given node's keypair with a new pre-generated KMS keypair") + @Help.Description( + """Rotates an existing encryption or signing key stored externally in a KMS with a pre-generated + key. + |The fingerprint of the key we want to rotate. + |The id of the new KMS key (e.g. Resource Name).""" + ) + def rotate_kms_node_key(fingerprint: String, newKmsKeyId: String): PublicKey = { + + val owner = instance.id.member + + val currentKey = findPublicKey(fingerprint, instance.topology, owner) + val newKey = currentKey.purpose match { + case KeyPurpose.Signing => instance.keys.secret.register_kms_signing_key(newKmsKeyId) + case KeyPurpose.Encryption => instance.keys.secret.register_kms_encryption_key(newKmsKeyId) + } + + // Rotate the key for the node in the topology management + instance.topology.owner_to_key_mappings.rotate_key( + instance, + owner, + currentKey, + newKey, + ) + newKey + } + + @Help.Summary("Rotate a node's public/private key pair") + @Help.Description( + """Rotates an existing encryption or signing key. NOTE: A namespace root or intermediate + signing key CANNOT be rotated by this command. + |The fingerprint of the key we want to rotate.""" + ) + def rotate_node_key(fingerprint: String, name: Option[String] = None): PublicKey = { + val owner = instance.id.member + + val currentKey = findPublicKey(fingerprint, instance.topology, owner) + + val newKey = name match { + case Some(_) => regenerateKey(currentKey, name) + case None => + regenerateKey( + currentKey, + generateNewNameForRotatedKey(fingerprint, consoleEnvironment.environment.clock), + ) + } + + // Rotate the key for the node in the topology management + instance.topology.owner_to_key_mappings.rotate_key( + instance, + owner, + currentKey, + newKey, + ) + newKey + } + + @Help.Summary("Rotate the node's public/private key pairs") + @Help.Description( + """ + |For a participant node it rotates the signing and encryption key pair. + |For a domain or domain manager node it rotates the signing key pair as those nodes do not have an encryption key pair. + |For a sequencer or mediator node use `rotate_node_keys` with a domain manager reference as an argument. + |NOTE: Namespace root or intermediate signing keys are NOT rotated by this command.""" + ) + def rotate_node_keys(): Unit = { + + val owner = instance.id.member + + // Find the current keys + val currentKeys = findPublicKeys(instance.topology, owner) + + currentKeys.foreach { currentKey => + val newKey = + regenerateKey( + currentKey, + generateNewNameForRotatedKey( + currentKey.fingerprint.unwrap, + consoleEnvironment.environment.clock, + ), + ) + + // Rotate the key for the node in the topology management + instance.topology.owner_to_key_mappings.rotate_key( + instance, + owner, + currentKey, + newKey, + ) + } + } + + /** Helper to find public keys for topology/x shared between community and enterprise + */ + protected def findPublicKeys( + topologyAdmin: TopologyAdministrationGroupCommon, + owner: Member, + ): Seq[PublicKey] = + topologyAdmin match { + case t: TopologyAdministrationGroup => + t.owner_to_key_mappings + .list( + filterStore = AuthorizedStore.filterName, + filterKeyOwnerUid = owner.filterString, + filterKeyOwnerType = Some(owner.code), + ) + .map(_.item.key) + case tx: TopologyAdministrationGroupX => + tx.owner_to_key_mappings + .list( + filterStore = AuthorizedStore.filterName, + filterKeyOwnerUid = owner.filterString, + filterKeyOwnerType = Some(owner.code), + ) + .flatMap(_.item.keys) + case _ => + throw new IllegalStateException( + "Impossible to encounter topology admin group besides X and non-X" + ) + } + + /** Helper to name new keys generated during a rotation with a ...-rotated- tag to better identify + * the new keys after a rotation + */ + protected def generateNewNameForRotatedKey( + currentKeyId: String, + clock: Clock, + ): Option[String] = { + val keyName = instance.keys.secret + .list() + .find(_.publicKey.fingerprint.unwrap == currentKeyId) + .flatMap(_.name) + + val rotatedKeyRegExp = "(.*-rotated).*".r + + keyName.map(_.unwrap) match { + case Some(rotatedKeyRegExp(currentName)) => + Some(s"$currentName-${clock.now.show}") + case Some(currentName) => + Some(s"$currentName-rotated-${clock.now.show}") + case None => None + } + } + + @Help.Summary("Change the wrapper key for encrypted private keys store") + @Help.Description( + """Change the wrapper key (e.g. AWS KMS key) being used to encrypt the private keys in the store. + |newWrapperKeyId: The optional new wrapper key id to be used. If the wrapper key id is empty Canton will generate a new key based on the current configuration.""" + ) + def rotate_wrapper_key( + newWrapperKeyId: String = "" + ): Unit = { + consoleEnvironment.run { + adminCommand(VaultAdminCommands.RotateWrapperKey(newWrapperKeyId)) + } + } + + @Help.Summary("Get the wrapper key id that is used for the encrypted private keys store") + def get_wrapper_key_id(): String = { + consoleEnvironment.run { + adminCommand(VaultAdminCommands.GetWrapperKeyId()) + } + } + + @Help.Summary("Upload (load and import) a key pair from file") + def upload(filename: String, name: Option[String]): Unit = { + val keyPair = BinaryFileUtil.tryReadByteStringFromFile(filename) + upload(keyPair, name) + } + + @Help.Summary("Upload a key pair") + def upload( + pairBytes: ByteString, + name: Option[String], + ): Unit = + consoleEnvironment.run { + adminCommand( + VaultAdminCommands.ImportKeyPair(pairBytes, name) + ) + } + + // TODO(i13613): Remove feature flag + @Help.Summary("Download key pair", FeatureFlag.Preview) + @Help.Description( + """Download the key pair with the private and public key in its binary representation. + |fingerprint: The identifier of the key pair to download + |protocolVersion: The (optional) protocol version that defines the serialization of the key pair""" + ) + def download( + fingerprint: Fingerprint, + protocolVersion: ProtocolVersion = ProtocolVersion.latest, + ): ByteString = { + check(FeatureFlag.Preview) { + consoleEnvironment.run { + adminCommand( + VaultAdminCommands.ExportKeyPair(fingerprint, protocolVersion) + ) + } + } + } + + protected def writeToFile(outputFile: String, bytes: ByteString): Unit = { + val file = new File(outputFile) + file.createNewFile() + // only current user has permissions with the file + try { + Files.setPosixFilePermissions(file.toPath, Set(OWNER_READ, OWNER_WRITE).asJava) + } catch { + // the above will throw on non-posix systems such as windows + case _: UnsupportedOperationException => + } + BinaryFileUtil.writeByteStringToFile(outputFile, bytes) + } + + @Help.Summary("Download key pair and save it to a file") + def download_to( + fingerprint: Fingerprint, + outputFile: String, + protocolVersion: ProtocolVersion = ProtocolVersion.latest, + ): Unit = { + writeToFile(outputFile, download(fingerprint, protocolVersion)) + } + + @Help.Summary("Delete private key") + def delete(fingerprint: Fingerprint, force: Boolean = false): Unit = { + def deleteKey(): Unit = + consoleEnvironment.run { + adminCommand( + VaultAdminCommands.DeleteKeyPair(fingerprint) + ) + } + + if (force) + deleteKey() + else { + println( + s"Are you sure you want to delete the private key with fingerprint $fingerprint? yes/no" + ) + println(s"This action is irreversible and can have undesired effects if done carelessly.") + print("> ") + val answer = Option(scala.io.StdIn.readLine()) + if (answer.exists(_.toLowerCase == "yes")) deleteKey() + } + } + +} + +class PublicKeyAdministration( + runner: AdminCommandRunner, + consoleEnvironment: ConsoleEnvironment, +) extends Helpful { + + import runner.* + + private def defaultLimit: PositiveInt = + consoleEnvironment.environment.config.parameters.console.defaultLimit + + @Help.Summary("Upload public key") + @Help.Description( + """Import a public key and store it together with a name used to provide some context to that key.""" + ) + def upload(keyBytes: ByteString, name: Option[String]): Fingerprint = consoleEnvironment.run { + adminCommand( + VaultAdminCommands.ImportPublicKey(keyBytes, name) + ) + } + + @Help.Summary("Upload public key") + @Help.Summary( + "Load a public key from a file and store it together with a name used to provide some context to that key." + ) + def upload(filename: String, name: Option[String]): Fingerprint = consoleEnvironment.run { + BinaryFileUtil.readByteStringFromFile(filename) match { + case Right(bytes) => adminCommand(VaultAdminCommands.ImportPublicKey(bytes, name)) + case Left(err) => throw new IllegalArgumentException(err) + } + } + + @Help.Summary("Download public key") + def download( + fingerprint: Fingerprint, + protocolVersion: ProtocolVersion = ProtocolVersion.latest, + ): ByteString = { + val keys = list(fingerprint.unwrap) + if (keys.sizeCompare(1) == 0) { // vector doesn't like matching on Nil + val key = keys.headOption.getOrElse(sys.error("no key")) + key.publicKey.toByteString(protocolVersion) + } else { + if (keys.isEmpty) throw new IllegalArgumentException(s"no key found for [$fingerprint]") + else + throw new IllegalArgumentException( + s"found multiple results for [$fingerprint]: ${keys.map(_.publicKey.fingerprint)}" + ) + } + } + + @Help.Summary("Download public key and save it to a file") + def download_to( + fingerprint: Fingerprint, + outputFile: String, + protocolVersion: ProtocolVersion = ProtocolVersion.latest, + ): Unit = { + BinaryFileUtil.writeByteStringToFile( + outputFile, + download(fingerprint, protocolVersion), + ) + } + + @Help.Summary("List public keys in registry") + @Help.Description("""Returns all public keys that have been added to the key registry. + Optional arguments can be used for filtering.""") + def list(filterFingerprint: String = "", filterContext: String = ""): Seq[PublicKeyWithName] = + consoleEnvironment.run { + adminCommand(VaultAdminCommands.ListPublicKeys(filterFingerprint, filterContext)) + } + + @Help.Summary("List active owners with keys for given search arguments.") + @Help.Description("""This command allows deep inspection of the topology state. + |The response includes the public keys. + |Optional filterKeyOwnerType type can be 'ParticipantId.Code' , 'MediatorId.Code','SequencerId.Code', 'DomainTopologyManagerId.Code'. + |""") + def list_owners( + filterKeyOwnerUid: String = "", + filterKeyOwnerType: Option[MemberCode] = None, + filterDomain: String = "", + asOf: Option[Instant] = None, + limit: PositiveInt = defaultLimit, + ): Seq[ListKeyOwnersResult] = consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Aggregation + .ListKeyOwners(filterDomain, filterKeyOwnerType, filterKeyOwnerUid, asOf, limit) + ) + } + + @Help.Summary("List keys for given keyOwner.") + @Help.Description( + """This command is a convenience wrapper for `list_key_owners`, taking an explicit keyOwner as search argument. + |The response includes the public keys.""" + ) + def list_by_owner( + keyOwner: Member, + filterDomain: String = "", + asOf: Option[Instant] = None, + limit: PositiveInt = defaultLimit, + ): Seq[ListKeyOwnersResult] = consoleEnvironment.run { + adminCommand( + TopologyAdminCommands.Aggregation.ListKeyOwners( + filterDomain = filterDomain, + filterKeyOwnerType = Some(keyOwner.code), + filterKeyOwnerUid = keyOwner.uid.toProtoPrimitive, + asOf, + limit, + ) + ) + } +} + +class KeyAdministrationGroup( + instance: InstanceReferenceCommon, + runner: AdminCommandRunner, + consoleEnvironment: ConsoleEnvironment, + loggerFactory: NamedLoggerFactory, +) extends Helpful { + + private lazy val publicAdmin = + new PublicKeyAdministration(runner, consoleEnvironment) + private lazy val secretAdmin = + new SecretKeyAdministration(instance, runner, consoleEnvironment, loggerFactory) + + @Help.Summary("Manage public keys") + @Help.Group("Public keys") + def public: PublicKeyAdministration = publicAdmin + + @Help.Summary("Manage secret keys") + @Help.Group("Secret keys") + def secret: SecretKeyAdministration = secretAdmin + +} + +class LocalSecretKeyAdministration( + instance: InstanceReferenceCommon, + runner: AdminCommandRunner, + consoleEnvironment: ConsoleEnvironment, + crypto: => Crypto, + loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext) + extends SecretKeyAdministration(instance, runner, consoleEnvironment, loggerFactory) { + + private def run[V](eitherT: EitherT[Future, String, V], action: String): V = { + import TraceContext.Implicits.Empty.* + consoleEnvironment.environment.config.parameters.timeouts.processing.default + .await(action)(eitherT.value) match { + case Left(error) => + throw new IllegalArgumentException(s"Problem while $action. Error: $error") + case Right(value) => value + } + } + + @Help.Summary("Download key pair") + override def download( + fingerprint: Fingerprint, + protocolVersion: ProtocolVersion = ProtocolVersion.latest, + ): ByteString = + TraceContext.withNewTraceContext { implicit traceContext => + val cmd = for { + cryptoPrivateStore <- crypto.cryptoPrivateStore.toExtended + .toRight( + "The selected crypto provider does not support exporting of private keys." + ) + .toEitherT[Future] + privateKey <- cryptoPrivateStore + .exportPrivateKey(fingerprint) + .leftMap(_.toString) + .subflatMap(_.toRight(s"no private key found for [$fingerprint]")) + .leftMap(err => s"Error retrieving private key [$fingerprint] $err") + publicKey <- crypto.cryptoPublicStore + .publicKey(fingerprint) + .leftMap(_.toString) + .subflatMap(_.toRight(s"no public key found for [$fingerprint]")) + .leftMap(err => s"Error retrieving public key [$fingerprint] $err") + keyPair: CryptoKeyPair[PublicKey, PrivateKey] = (publicKey, privateKey) match { + case (pub: SigningPublicKey, pkey: SigningPrivateKey) => + new SigningKeyPair(pub, pkey) + case (pub: EncryptionPublicKey, pkey: EncryptionPrivateKey) => + new EncryptionKeyPair(pub, pkey) + case _ => sys.error("public and private keys must have same purpose") + } + keyPairBytes = keyPair.toByteString(protocolVersion) + } yield keyPairBytes + run(cmd, "exporting key pair") + } + + @Help.Summary("Download key pair and save it to a file") + override def download_to( + fingerprint: Fingerprint, + outputFile: String, + protocolVersion: ProtocolVersion = ProtocolVersion.latest, + ): Unit = + run( + EitherT.rightT(writeToFile(outputFile, download(fingerprint, protocolVersion))), + "saving key pair to file", + ) + +} + +class LocalKeyAdministrationGroup( + instance: InstanceReferenceCommon, + runner: AdminCommandRunner, + consoleEnvironment: ConsoleEnvironment, + crypto: => Crypto, + loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext) + extends KeyAdministrationGroup(instance, runner, consoleEnvironment, loggerFactory) { + + private lazy val localSecretAdmin: LocalSecretKeyAdministration = + new LocalSecretKeyAdministration(instance, runner, consoleEnvironment, crypto, loggerFactory) + + @Help.Summary("Manage secret keys") + @Help.Group("Secret keys") + override def secret: LocalSecretKeyAdministration = localSecretAdmin +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/package.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/package.scala new file mode 100644 index 0000000000..df8a5b990f --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/package.scala @@ -0,0 +1,45 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import cats.syntax.either.* +import cats.syntax.functorFilter.* +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.ErrorLoggingContext + +package object commands { + + /** Runs every body, even if some of them fail with a `CommandExecutionFailedException`. + * Succeeds, if all bodies succeed. + * If some body throws a `Throwable` other than `CommandExecutionFailedException`, the execution terminates immediately with that exception. + * If some body throws a `CommandExecutionFailedException`, subsequent bodies are still executed and afterwards the + * methods throws a `CommandExecutionFailedException`, preferring `CantonInternalErrors` over `CommandFailure`. + */ + private[commands] def runEvery[A](bodies: Seq[() => Unit]): Unit = { + val exceptions = bodies.mapFilter(body => + try { + body() + None + } catch { + case e: CommandFailure => Some(e) + case e: CantonInternalError => Some(e) + } + ) + // It is ok to discard all except one exceptions, because: + // - The exceptions do not have meaningful messages. Error messages are logged instead. + // - The exception have all the same stack trace. + exceptions.collectFirst { case e: CantonInternalError => throw e }.discard + exceptions.headOption.foreach(throw _) + } + + private[commands] def timestampFromInstant( + instant: java.time.Instant + )(implicit loggingContext: ErrorLoggingContext): CantonTimestamp = + CantonTimestamp.fromInstant(instant).valueOr { err => + loggingContext.logger.error(err)(loggingContext.traceContext) + throw new CommandFailure() + } + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/package.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/package.scala new file mode 100644 index 0000000000..63522caec5 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/console/package.scala @@ -0,0 +1,44 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton + +import com.digitalasset.canton.console.CommandErrors.GenericCommandError + +/** General `console` utilities + */ +package object console { + + /** Turn a either into a command result. + * Left is considered an error, Right is successful. + */ + implicit class EitherToCommandResultExtensions[A, B](either: Either[A, B]) { + def toResult(errorDescription: A => String): ConsoleCommandResult[B] = + either.fold[ConsoleCommandResult[B]]( + err => GenericCommandError(errorDescription(err)), + CommandSuccessful[B], + ) + + def toResult[Result]( + errorDescription: A => String, + resultMap: B => Result, + ): ConsoleCommandResult[Result] = + either.fold[ConsoleCommandResult[Result]]( + err => GenericCommandError(errorDescription(err)), + result => CommandSuccessful(resultMap(result)), + ) + } + + /** Turn an either where Left is a error message into a ConsoleCommandResult. + */ + implicit class StringErrorEitherToCommandResultExtensions[A](either: Either[String, A]) { + def toResult: ConsoleCommandResult[A] = + either.fold[ConsoleCommandResult[A]](GenericCommandError, CommandSuccessful[A]) + } + + /** Strip the Object suffix from the name of the provided class + */ + def objectClassNameWithoutSuffix(c: Class[_]): String = + c.getName.stripSuffix("$").replace('$', '.') + +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/environment/CommunityEnvironment.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/environment/CommunityEnvironment.scala new file mode 100644 index 0000000000..c9fac22145 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/environment/CommunityEnvironment.scala @@ -0,0 +1,147 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.environment + +import com.digitalasset.canton.admin.api.client.data.CommunityCantonStatus +import com.digitalasset.canton.config.{CantonCommunityConfig, TestingConfigInternal} +import com.digitalasset.canton.console.{ + CantonHealthAdministration, + CommunityCantonHealthAdministration, + CommunityHealthDumpGenerator, + CommunityLocalDomainReference, + CommunityRemoteDomainReference, + ConsoleEnvironment, + ConsoleEnvironmentBinding, + ConsoleGrpcAdminCommandRunner, + ConsoleOutput, + DomainReference, + FeatureFlag, + GrpcAdminCommandRunner, + HealthDumpGenerator, + Help, + LocalDomainReference, + LocalInstanceReferenceCommon, + LocalParticipantReference, + NodeReferences, + StandardConsoleOutput, +} +import com.digitalasset.canton.domain.DomainNodeBootstrap +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.participant.{ParticipantNodeBootstrap, ParticipantNodeBootstrapX} +import com.digitalasset.canton.resource.{CommunityDbMigrationsFactory, DbMigrationsFactory} + +class CommunityEnvironment( + override val config: CantonCommunityConfig, + override val testingConfig: TestingConfigInternal, + override val loggerFactory: NamedLoggerFactory, +) extends Environment { + + override type Config = CantonCommunityConfig + + override protected val participantNodeFactory + : ParticipantNodeBootstrap.Factory[Config#ParticipantConfigType, ParticipantNodeBootstrap] = + ParticipantNodeBootstrap.CommunityParticipantFactory + override protected val participantNodeFactoryX + : ParticipantNodeBootstrap.Factory[Config#ParticipantConfigType, ParticipantNodeBootstrapX] = + ParticipantNodeBootstrapX.CommunityParticipantFactory + + override protected val domainFactory: DomainNodeBootstrap.Factory[Config#DomainConfigType] = + DomainNodeBootstrap.CommunityDomainFactory + override type Console = CommunityConsoleEnvironment + + override protected def _createConsole( + consoleOutput: ConsoleOutput, + createAdminCommandRunner: ConsoleEnvironment => ConsoleGrpcAdminCommandRunner, + ): CommunityConsoleEnvironment = + new CommunityConsoleEnvironment(this, consoleOutput, createAdminCommandRunner) + + override protected lazy val migrationsFactory: DbMigrationsFactory = + new CommunityDbMigrationsFactory(loggerFactory) + + override def isEnterprise: Boolean = false + + def createHealthDumpGenerator( + commandRunner: GrpcAdminCommandRunner + ): HealthDumpGenerator[CommunityCantonStatus] = { + new CommunityHealthDumpGenerator(this, commandRunner) + } +} + +object CommunityEnvironmentFactory extends EnvironmentFactory[CommunityEnvironment] { + override def create( + config: CantonCommunityConfig, + loggerFactory: NamedLoggerFactory, + testingConfigInternal: TestingConfigInternal, + ): CommunityEnvironment = + new CommunityEnvironment(config, testingConfigInternal, loggerFactory) +} + +class CommunityConsoleEnvironment( + val environment: CommunityEnvironment, + val consoleOutput: ConsoleOutput = StandardConsoleOutput, + protected val createAdminCommandRunner: ConsoleEnvironment => ConsoleGrpcAdminCommandRunner = + new ConsoleGrpcAdminCommandRunner(_), +) extends ConsoleEnvironment { + override type Env = CommunityEnvironment + override type DomainLocalRef = CommunityLocalDomainReference + override type DomainRemoteRef = CommunityRemoteDomainReference + override type Status = CommunityCantonStatus + + private lazy val health_ = new CommunityCantonHealthAdministration(this) + override protected val consoleEnvironmentBindings = new ConsoleEnvironmentBinding() + + @Help.Summary("Environment health inspection") + @Help.Group("Health") + override def health: CantonHealthAdministration[Status] = + health_ + + override def startupOrderPrecedence(instance: LocalInstanceReferenceCommon): Int = + instance match { + case _: LocalDomainReference => 1 + case _: LocalParticipantReference => 2 + case _ => 3 + } + + override protected def createDomainReference(name: String): DomainLocalRef = + new CommunityLocalDomainReference(this, name, environment.executionContext) + + override protected def createRemoteDomainReference(name: String): DomainRemoteRef = + new CommunityRemoteDomainReference(this, name) + + override protected def domainsTopLevelValue( + h: TopLevelValue.Partial, + domains: NodeReferences[ + DomainReference, + CommunityRemoteDomainReference, + CommunityLocalDomainReference, + ], + ): TopLevelValue[ + NodeReferences[DomainReference, CommunityRemoteDomainReference, CommunityLocalDomainReference] + ] = + h(domains) + + override protected def localDomainTopLevelValue( + h: TopLevelValue.Partial, + d: CommunityLocalDomainReference, + ): TopLevelValue[CommunityLocalDomainReference] = + h(d) + + override protected def remoteDomainTopLevelValue( + h: TopLevelValue.Partial, + d: CommunityRemoteDomainReference, + ): TopLevelValue[CommunityRemoteDomainReference] = + h(d) + + override protected def localDomainHelpItems( + scope: Set[FeatureFlag], + localDomain: CommunityLocalDomainReference, + ): Seq[Help.Item] = + Help.getItems(localDomain, baseTopic = Seq("$domain"), scope = scope) + + override protected def remoteDomainHelpItems( + scope: Set[FeatureFlag], + remoteDomain: CommunityRemoteDomainReference, + ): Seq[Help.Item] = + Help.getItems(remoteDomain, baseTopic = Seq("$domain"), scope = scope) +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/environment/Environment.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/environment/Environment.scala new file mode 100644 index 0000000000..f26dcba3bb --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/environment/Environment.scala @@ -0,0 +1,627 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.environment + +import cats.data.EitherT +import cats.instances.option.* +import cats.syntax.apply.* +import cats.syntax.either.* +import cats.syntax.foldable.* +import cats.syntax.traverse.* +import com.daml.grpc.adapter.ExecutionSequencerFactory +import com.digitalasset.canton.concurrent.* +import com.digitalasset.canton.config.* +import com.digitalasset.canton.console.{ + ConsoleEnvironment, + ConsoleGrpcAdminCommandRunner, + ConsoleOutput, + GrpcAdminCommandRunner, + HealthDumpGenerator, + StandardConsoleOutput, +} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.domain.DomainNodeBootstrap +import com.digitalasset.canton.environment.CantonNodeBootstrap.HealthDumpFunction +import com.digitalasset.canton.environment.Environment.* +import com.digitalasset.canton.environment.ParticipantNodes.{ParticipantNodesOld, ParticipantNodesX} +import com.digitalasset.canton.health.{HealthCheck, HealthServer} +import com.digitalasset.canton.lifecycle.Lifecycle +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.metrics.MetricsConfig.Prometheus +import com.digitalasset.canton.metrics.MetricsFactory +import com.digitalasset.canton.participant.domain.DomainConnectionConfig +import com.digitalasset.canton.participant.{ + ParticipantNode, + ParticipantNodeBootstrap, + ParticipantNodeBootstrapCommon, + ParticipantNodeBootstrapX, + ParticipantNodeCommon, +} +import com.digitalasset.canton.resource.DbMigrationsFactory +import com.digitalasset.canton.sequencing.SequencerConnections +import com.digitalasset.canton.telemetry.{ConfiguredOpenTelemetry, OpenTelemetryFactory} +import com.digitalasset.canton.time.EnrichedDurations.* +import com.digitalasset.canton.time.* +import com.digitalasset.canton.tracing.TraceContext.withNewTraceContext +import com.digitalasset.canton.tracing.{NoTracing, TraceContext, TracerProvider} +import com.digitalasset.canton.util.FutureInstances.parallelFuture +import com.digitalasset.canton.util.{MonadUtil, PekkoUtil, SingleUseCell} +import com.digitalasset.canton.{DiscardOps, DomainAlias} +import com.google.common.annotations.VisibleForTesting +import io.circe.Encoder +import io.opentelemetry.api.trace.Tracer +import org.apache.pekko.actor.ActorSystem +import org.slf4j.bridge.SLF4JBridgeHandler + +import java.util.concurrent.ScheduledExecutorService +import scala.collection.mutable.ListBuffer +import scala.concurrent.{Future, blocking} +import scala.util.control.NonFatal + +/** Holds all significant resources held by this process. + */ +trait Environment extends NamedLogging with AutoCloseable with NoTracing { + + type Config <: CantonConfig + type Console <: ConsoleEnvironment + + val config: Config + val testingConfig: TestingConfigInternal + + val loggerFactory: NamedLoggerFactory + + lazy val configuredOpenTelemetry: ConfiguredOpenTelemetry = { + val isPrometheusEnabled = config.monitoring.metrics.reporters.exists { + case _: Prometheus => true + case _ => false + } + OpenTelemetryFactory.initializeOpenTelemetry( + testingConfig.initializeGlobalOpenTelemetry, + isPrometheusEnabled, + config.monitoring.tracing.tracer, + config.monitoring.metrics.histograms, + loggerFactory, + ) + } + + // public for buildDocs task to be able to construct a fake participant and domain to document available metrics via reflection + lazy val metricsFactory: MetricsFactory = + MetricsFactory.forConfig( + config.monitoring.metrics, + configuredOpenTelemetry.openTelemetry, + testingConfig.metricsFactoryType, + ) + protected def participantNodeFactory + : ParticipantNodeBootstrap.Factory[Config#ParticipantConfigType, ParticipantNodeBootstrap] + protected def participantNodeFactoryX + : ParticipantNodeBootstrap.Factory[Config#ParticipantConfigType, ParticipantNodeBootstrapX] + protected def domainFactory: DomainNodeBootstrap.Factory[Config#DomainConfigType] + protected def migrationsFactory: DbMigrationsFactory + + def isEnterprise: Boolean + + def createConsole( + consoleOutput: ConsoleOutput = StandardConsoleOutput, + createAdminCommandRunner: ConsoleEnvironment => ConsoleGrpcAdminCommandRunner = + new ConsoleGrpcAdminCommandRunner(_), + ): Console = { + val console = _createConsole(consoleOutput, createAdminCommandRunner) + healthDumpGenerator + .putIfAbsent(createHealthDumpGenerator(console.grpcAdminCommandRunner)) + .discard + console + } + + protected def _createConsole( + consoleOutput: ConsoleOutput = StandardConsoleOutput, + createAdminCommandRunner: ConsoleEnvironment => ConsoleGrpcAdminCommandRunner = + new ConsoleGrpcAdminCommandRunner(_), + ): Console + + protected def createHealthDumpGenerator( + commandRunner: GrpcAdminCommandRunner + ): HealthDumpGenerator[_] + + /* We can't reliably use the health administration instance of the console because: + * 1) it's tied to the console environment, which we don't have access to yet when the environment is instantiated + * 2) there might never be a console environment when running in daemon mode + * Therefore we create an immutable lazy value for the health administration that can be set either with the console + * health admin when/if it gets created, or with a headless health admin, whichever comes first. + */ + private val healthDumpGenerator = new SingleUseCell[HealthDumpGenerator[_]] + + // Function passed down to the node boostrap used to generate a health dump file + val writeHealthDumpToFile: HealthDumpFunction = () => + Future { + healthDumpGenerator + .getOrElse { + val tracerProvider = + TracerProvider.Factory(configuredOpenTelemetry, "admin_command_runner") + implicit val tracer: Tracer = tracerProvider.tracer + + val commandRunner = new GrpcAdminCommandRunner(this, config.parameters.timeouts.console) + val newGenerator = createHealthDumpGenerator(commandRunner) + val previous = healthDumpGenerator.putIfAbsent(newGenerator) + previous match { + // If somehow the cell was set concurrently in the meantime, close the newly created command runner and use + // the existing one + case Some(value) => + commandRunner.close() + value + case None => + newGenerator + } + } + .generateHealthDump( + better.files.File.newTemporaryFile( + prefix = "canton-remote-health-dump" + ) + ) + } + + installJavaUtilLoggingBridge() + logger.debug(config.portDescription) + + implicit val scheduler: ScheduledExecutorService = + Threading.singleThreadScheduledExecutor( + loggerFactory.threadName + "-env-scheduler", + noTracingLogger, + ) + + private val numThreads = Threading.detectNumberOfThreads(noTracingLogger) + implicit val executionContext: ExecutionContextIdlenessExecutorService = + Threading.newExecutionContext( + loggerFactory.threadName + "-env-execution-context", + noTracingLogger, + Option.when(config.monitoring.metrics.reportExecutionContextMetrics)( + metricsFactory.executionServiceMetrics + ), + numThreads, + ) + + private val deadlockConfig = config.monitoring.deadlockDetection + protected def timeouts: ProcessingTimeout = config.parameters.timeouts.processing + + protected val futureSupervisor = + if (config.monitoring.logSlowFutures) + new FutureSupervisor.Impl(timeouts.slowFutureWarn) + else FutureSupervisor.Noop + + private val monitorO = if (deadlockConfig.enabled) { + val mon = new ExecutionContextMonitor( + loggerFactory, + deadlockConfig.interval.toInternal, + deadlockConfig.warnInterval.toInternal, + timeouts, + ) + mon.monitor(executionContext) + Some(mon) + } else None + + implicit val actorSystem: ActorSystem = PekkoUtil.createActorSystem(loggerFactory.threadName) + + implicit val executionSequencerFactory: ExecutionSequencerFactory = + PekkoUtil.createExecutionSequencerFactory( + loggerFactory.threadName + "-admin-workflow-services", + // don't log the number of threads twice, as we log it already when creating the first pool + NamedLogging.noopNoTracingLogger, + ) + + // additional closeables + private val userCloseables = ListBuffer[AutoCloseable]() + + /** Sim-clock if environment is using static time + */ + val simClock: Option[DelegatingSimClock] = config.parameters.clock match { + case ClockConfig.SimClock => + logger.info("Starting environment with sim-clock") + Some( + new DelegatingSimClock( + () => + runningNodes.map(_.clock).collect { case c: SimClock => + c + }, + loggerFactory = loggerFactory, + ) + ) + case ClockConfig.WallClock(_) => None + case ClockConfig.RemoteClock(_) => None + } + + val clock: Clock = simClock.getOrElse(createClock(None)) + + protected def createClock(nodeTypeAndName: Option[(String, String)]): Clock = { + val clockLoggerFactory = nodeTypeAndName.fold(loggerFactory) { case (nodeType, name) => + loggerFactory.append(nodeType, name) + } + config.parameters.clock match { + case ClockConfig.SimClock => + val parent = simClock.getOrElse(sys.error("This should not happen")) + val clock = new SimClock( + parent.start, + clockLoggerFactory, + ) + clock.advanceTo(parent.now) + clock + case ClockConfig.RemoteClock(clientConfig) => + new RemoteClock(clientConfig, config.parameters.timeouts.processing, clockLoggerFactory) + case ClockConfig.WallClock(skewW) => + val skewMs = skewW.asJava.toMillis + val tickTock = + if (skewMs == 0) TickTock.Native + else new TickTock.RandomSkew(Math.min(skewMs, Int.MaxValue).toInt) + new WallClock(timeouts, clockLoggerFactory, tickTock) + } + } + + private val testingTimeService = new TestingTimeService(clock, () => simClocks) + + protected lazy val healthCheck: Option[HealthCheck] = config.monitoring.health.map(config => + HealthCheck(config.check, metricsFactory.health, timeouts, loggerFactory)(this) + ) + + private val healthServer = + (healthCheck, config.monitoring.health).mapN { case (check, config) => + new HealthServer(check, config.server.address, config.server.port, timeouts, loggerFactory) + } + + private val envQueueSize = () => executionContext.queueSize + metricsFactory.forEnv.registerExecutionContextQueueSize(envQueueSize) + + lazy val domains = + new DomainNodes( + createDomain, + migrationsFactory, + timeouts, + config.domainsByString, + config.domainNodeParametersByString, + loggerFactory, + ) + lazy val participants = + new ParticipantNodesOld[Config#ParticipantConfigType]( + createParticipant, + migrationsFactory, + timeouts, + config.participantsByString, + config.participantNodeParametersByString, + loggerFactory, + ) + lazy val participantsX = + new ParticipantNodesX[Config#ParticipantConfigType]( + createParticipantX, + migrationsFactory, + timeouts, + config.participantsByStringX, + config.participantNodeParametersByString, + loggerFactory, + ) + + // convenient grouping of all node collections for performing operations + // intentionally defined in the order we'd like to start them + protected def allNodes: List[Nodes[CantonNode, CantonNodeBootstrap[CantonNode]]] = + List(domains, participants, participantsX) + private def runningNodes: Seq[CantonNodeBootstrap[CantonNode]] = allNodes.flatMap(_.running) + + private def autoConnectLocalNodes(): Either[StartupError, Unit] = { + // TODO(#14048) extend this to x-nodes + val activeDomains = domains.running + .filter(_.isActive) + .filter(_.config.topology.open) + def toDomainConfig(domain: DomainNodeBootstrap): Either[StartupError, DomainConnectionConfig] = + (for { + connection <- domain.config.sequencerConnectionConfig.toConnection + name <- DomainAlias.create(domain.name.unwrap) + sequencerConnections = SequencerConnections.single(connection) + } yield DomainConnectionConfig(name, sequencerConnections)).leftMap(err => + StartFailed(domain.name.unwrap, s"Can not parse config for auto-connect: ${err}") + ) + val connectParticipants = + participants.running.filter(_.isActive).flatMap(x => x.getNode.map((x.name, _)).toList) + def connect( + name: String, + node: ParticipantNode, + configs: Seq[DomainConnectionConfig], + ): Either[StartupError, Unit] = + configs.traverse_ { config => + val connectET = + node + .autoConnectLocalDomain(config) + .leftMap(err => StartFailed(name, err.toString)) + .onShutdown(Left(StartFailed(name, "aborted due to shutdown"))) + this.config.parameters.timeouts.processing.unbounded + .await("auto-connect to local domain")(connectET.value) + } + logger.info(s"Auto-connecting local participants ${connectParticipants + .map(_._1.unwrap)} to local domains ${activeDomains.map(_.name.unwrap)}") + activeDomains + .traverse(toDomainConfig) + .traverse_(config => + connectParticipants.traverse_ { case (name, node) => connect(name.unwrap, node, config) } + ) + } + + /** Try to startup all nodes in the configured environment and reconnect them to one another. + * The first error will prevent further nodes from being started. + * If an error is returned previously started nodes will not be stopped. + */ + def startAndReconnect(autoConnectLocal: Boolean): Either[StartupError, Unit] = + withNewTraceContext { implicit traceContext => + if (config.parameters.manualStart) { + logger.info("Manual start requested.") + Right(()) + } else { + logger.info("Automatically starting all instances") + val startup = for { + _ <- startAll() + _ <- reconnectParticipants + _ <- if (autoConnectLocal) autoConnectLocalNodes() else Right(()) + } yield writePortsFile() + // log results + startup + .bimap( + error => logger.error(s"Failed to start ${error.name}: ${error.message}"), + _ => logger.info("Successfully started all nodes"), + ) + .discard + startup + } + + } + + private def writePortsFile()(implicit + traceContext: TraceContext + ): Unit = { + final case class ParticipantApis(ledgerApi: Int, adminApi: Int) + config.parameters.portsFile.foreach { portsFile => + val items = participants.running.map { node => + ( + node.name.unwrap, + ParticipantApis(node.config.ledgerApi.port.unwrap, node.config.adminApi.port.unwrap), + ) + }.toMap + import io.circe.syntax.* + implicit val encoder: Encoder[ParticipantApis] = + Encoder.forProduct2("ledgerApi", "adminApi") { apis => + (apis.ledgerApi, apis.adminApi) + } + val out = items.asJson.spaces2 + try { + better.files.File(portsFile).overwrite(out) + } catch { + case NonFatal(ex) => + logger.warn(s"Failed to write to port file ${portsFile}. Will ignore the error", ex) + } + } + } + + private def reconnectParticipants(implicit + traceContext: TraceContext + ): Either[StartupError, Unit] = { + def reconnect( + instance: CantonNodeBootstrap[ParticipantNodeCommon] & ParticipantNodeBootstrapCommon + ): EitherT[Future, StartupError, Unit] = { + instance.getNode match { + case None => + // should not happen, but if it does, display at least a warning. + if (instance.config.init.autoInit) { + logger.error( + s"Auto-initialisation failed or was too slow for ${instance.name}. Will not automatically re-connect to domains." + ) + } + EitherT.rightT(()) + case Some(node) => + node + .reconnectDomainsIgnoreFailures() + .leftMap(err => StartFailed(instance.name.unwrap, err.toString)) + .onShutdown(Left(StartFailed(instance.name.unwrap, "aborted due to shutdown"))) + + } + } + config.parameters.timeouts.processing.unbounded.await("reconnect-particiapnts")( + MonadUtil + .parTraverseWithLimit_(config.parameters.getStartupParallelism(numThreads))( + (participants.running ++ participantsX.running) + )(reconnect) + .value + ) + } + + /** Return current time of environment + */ + def now: CantonTimestamp = clock.now + + private def allNodesWithGroup = { + allNodes.flatMap { nodeGroup => + nodeGroup.names().map(name => (name, nodeGroup)) + } + } + + /** Start all instances described in the configuration + */ + def startAll()(implicit traceContext: TraceContext): Either[StartupError, Unit] = + startNodes(allNodesWithGroup) + + def stopAll()(implicit traceContext: TraceContext): Either[ShutdownError, Unit] = + stopNodes(allNodesWithGroup) + + def startNodes( + nodes: Seq[(String, Nodes[CantonNode, CantonNodeBootstrap[CantonNode]])] + )(implicit traceContext: TraceContext): Either[StartupError, Unit] = { + runOnNodesOrderedByStartupGroup( + "startup-of-all-nodes", + nodes, + { case (name, nodes) => nodes.start(name) }, + reverse = false, + ) + } + + def stopNodes( + nodes: Seq[(String, Nodes[CantonNode, CantonNodeBootstrap[CantonNode]])] + )(implicit traceContext: TraceContext): Either[ShutdownError, Unit] = { + runOnNodesOrderedByStartupGroup( + "stop-of-all-nodes", + nodes, + { case (name, nodes) => nodes.stop(name) }, + reverse = true, + ) + } + + /** run some task on nodes ordered by their startup group + * + * @param reverse if true, then the order will be reverted (e.g. for stop) + */ + private def runOnNodesOrderedByStartupGroup[T, I]( + name: String, + nodes: Seq[(String, Nodes[CantonNode, CantonNodeBootstrap[CantonNode]])], + task: (String, Nodes[CantonNode, CantonNodeBootstrap[CantonNode]]) => EitherT[Future, T, I], + reverse: Boolean, + )(implicit traceContext: TraceContext): Either[T, Unit] = { + config.parameters.timeouts.processing.unbounded.await(name)( + MonadUtil + .sequentialTraverse_( + nodes + // parallelize startup by groups (mediator / topology manager need the sequencer to run when we startup) + // as otherwise, they start to emit a few warnings which are ugly + .groupBy { case (_, group) => group.startUpGroup } + .toList + .sortBy { case (group, _) => if (reverse) -group else group } + ) { case (_, namesWithGroup) => + MonadUtil + .parTraverseWithLimit_(config.parameters.getStartupParallelism(numThreads))( + namesWithGroup.sortBy { case (name, _) => + name // sort by name to make the invocation order deterministic, hence also the result + } + ) { case (name, nodes) => task(name, nodes) } + } + .value + ) + } + + @VisibleForTesting + protected def createParticipant( + name: String, + participantConfig: Config#ParticipantConfigType, + ): ParticipantNodeBootstrap = { + participantNodeFactory + .create( + NodeFactoryArguments( + name, + participantConfig, + config.participantNodeParametersByString(name), + createClock(Some(ParticipantNodeBootstrap.LoggerFactoryKeyName -> name)), + metricsFactory.forParticipant(name), + testingConfig, + futureSupervisor, + loggerFactory.append(ParticipantNodeBootstrap.LoggerFactoryKeyName, name), + writeHealthDumpToFile, + configuredOpenTelemetry, + ), + testingTimeService, + ) + .valueOr(err => throw new RuntimeException(s"Failed to create participant bootstrap: $err")) + } + + protected def createParticipantX( + name: String, + participantConfig: Config#ParticipantConfigType, + ): ParticipantNodeBootstrapX = { + participantNodeFactoryX + .create( + NodeFactoryArguments( + name, + participantConfig, + // this is okay for x-nodes, as we've merged the two parameter sequences + config.participantNodeParametersByString(name), + createClock(Some(ParticipantNodeBootstrap.LoggerFactoryKeyName -> name)), + metricsFactory.forParticipant(name), + testingConfig, + futureSupervisor, + loggerFactory.append(ParticipantNodeBootstrap.LoggerFactoryKeyName, name), + writeHealthDumpToFile, + configuredOpenTelemetry, + ), + testingTimeService, + ) + .valueOr(err => throw new RuntimeException(s"Failed to create participant bootstrap: $err")) + } + + @VisibleForTesting + protected def createDomain( + name: String, + domainConfig: config.DomainConfigType, + ): DomainNodeBootstrap = + domainFactory + .create( + NodeFactoryArguments( + name, + domainConfig, + config.domainNodeParametersByString(name), + createClock(Some(DomainNodeBootstrap.LoggerFactoryKeyName -> name)), + metricsFactory.forDomain(name), + testingConfig, + futureSupervisor, + loggerFactory.append(DomainNodeBootstrap.LoggerFactoryKeyName, name), + writeHealthDumpToFile, + configuredOpenTelemetry, + ) + ) + .valueOr(err => throw new RuntimeException(s"Failed to create domain bootstrap: $err")) + + private def simClocks: Seq[SimClock] = { + val clocks = clock +: (participants.running.map(_.clock) ++ domains.running.map(_.clock)) + val simclocks = clocks.collect { case sc: SimClock => sc } + if (simclocks.sizeCompare(clocks) < 0) + logger.warn(s"Found non-sim clocks, testing time service will be broken.") + simclocks + } + + def addUserCloseable(closeable: AutoCloseable): Unit = userCloseables.append(closeable) + + override def close(): Unit = blocking(this.synchronized { + val closeActorSystem: AutoCloseable = + Lifecycle.toCloseableActorSystem(actorSystem, logger, timeouts) + + val closeExecutionContext: AutoCloseable = + ExecutorServiceExtensions(executionContext)(logger, timeouts) + val closeScheduler: AutoCloseable = ExecutorServiceExtensions(scheduler)(logger, timeouts) + + val closeHealthServer: AutoCloseable = () => healthServer.foreach(_.close()) + + val closeHeadlessHealthAdministration: AutoCloseable = + () => healthDumpGenerator.get.foreach(_.grpcAdminCommandRunner.close()) + + // the allNodes list is ordered in ideal startup order, so reverse to shutdown + val instances = + monitorO.toList ++ userCloseables ++ allNodes.reverse :+ metricsFactory :+ configuredOpenTelemetry :+ clock :+ closeHealthServer :+ + closeHeadlessHealthAdministration :+ executionSequencerFactory :+ closeActorSystem :+ closeExecutionContext :+ + closeScheduler + logger.info("Closing environment...") + Lifecycle.close((instances.toSeq): _*)(logger) + }) +} + +object Environment { + + /** Ensure all java.util.logging statements are routed to slf4j instead and can be configured with logback. + * This should be paired with adding a LevelChangePropagator to the logback configuration to avoid the performance impact + * of translating all JUL log statements (regardless of whether they are being used). + * See for more details: https://logback.qos.ch/manual/configuration.html#LevelChangePropagator + */ + def installJavaUtilLoggingBridge(): Unit = { + if (!SLF4JBridgeHandler.isInstalled) { + // we want everything going to slf4j so remove any default loggers + SLF4JBridgeHandler.removeHandlersForRootLogger() + SLF4JBridgeHandler.install() + } + } + +} + +trait EnvironmentFactory[E <: Environment] { + def create( + config: E#Config, + loggerFactory: NamedLoggerFactory, + testingConfigInternal: TestingConfigInternal = TestingConfigInternal(), + ): E +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/environment/Errors.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/environment/Errors.scala new file mode 100644 index 0000000000..e028c820a3 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/environment/Errors.scala @@ -0,0 +1,72 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.environment + +import com.digitalasset.canton.resource.DbMigrations + +sealed trait StartupError extends Product with Serializable { + + /** Node name */ + val name: String + def message: String + override def toString: String = s"${name}: $message" + +} + +/** The current action cannot be performed when the instance for the given name is running. + */ +final case class AlreadyRunning(name: String) extends StartupError { + def message = s"node is already running: $name" +} + +final case class FailedDatabaseMigration(name: String, cause: DbMigrations.Error) + extends StartupError { + def message: String = s"failed to migrate database of $name: $cause" +} + +final case class FailedDatabaseVersionChecks(name: String, cause: DbMigrations.DatabaseVersionError) + extends StartupError { + def message: String = s"version checks failed for database of $name: $cause" +} + +final case class FailedDatabaseConfigChecks(name: String, cause: DbMigrations.DatabaseConfigError) + extends StartupError { + def message: String = s"config checks failed for database of $name: $cause" +} + +final case class FailedDatabaseRepairMigration(name: String, cause: DbMigrations.Error) + extends StartupError { + def message: String = s"failed to repair the database migration of $name: $cause" +} + +final case class DidntUseForceOnRepairMigration(name: String) extends StartupError { + def message: String = + s"repair_migration` is a command that may lead to data corruption in the worst case if an " + + s"incompatible database migration is subsequently applied. To use it you need to call `$name.db.repair_migration(force=true)`. " + + s"See `help($name.db.repair_migration)` for more details. " +} + +final case class StartFailed(name: String, message: String) extends StartupError + +final case class ShutdownDuringStartup(name: String, message: String) extends StartupError + +/** Trying to start the node when the database has pending migrations + */ +final case class PendingDatabaseMigration(name: String, pendingMigrationMessage: String) + extends StartupError { + def message = s"failed to initialize $name: $pendingMigrationMessage" +} + +sealed trait ShutdownError { + val name: String + def message: String + + override def toString: String = s"${name}: $message" +} + +/** Configuration for the given name was not found in the CantonConfig + */ +final case class ConfigurationNotFound(name: String) extends StartupError with ShutdownError { + def message = s"configuration not found: $name" +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/environment/Nodes.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/environment/Nodes.scala new file mode 100644 index 0000000000..a7d349c454 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/environment/Nodes.scala @@ -0,0 +1,440 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.environment + +import cats.data.EitherT +import cats.instances.future.* +import cats.syntax.either.* +import cats.syntax.foldable.* +import cats.{Applicative, Id} +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.concurrent.ExecutionContextIdlenessExecutorService +import com.digitalasset.canton.config.{DbConfig, LocalNodeConfig, ProcessingTimeout, StorageConfig} +import com.digitalasset.canton.domain.config.DomainConfig +import com.digitalasset.canton.domain.{Domain, DomainNodeBootstrap, DomainNodeParameters} +import com.digitalasset.canton.lifecycle.* +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.participant.* +import com.digitalasset.canton.participant.config.LocalParticipantConfig +import com.digitalasset.canton.participant.ledger.api.CantonLedgerApiServerWrapper +import com.digitalasset.canton.participant.ledger.api.CantonLedgerApiServerWrapper.MigrateSchemaConfig +import com.digitalasset.canton.resource.DbStorage.RetryConfig +import com.digitalasset.canton.resource.{DbMigrations, DbMigrationsFactory} +import com.digitalasset.canton.tracing.TraceContext + +import scala.collection.concurrent.TrieMap +import scala.concurrent.{ExecutionContext, Future, Promise, blocking} +import scala.util.{Failure, Success} + +/** Group of CantonNodes of the same type (domains, participants, sequencers). */ +trait Nodes[+Node <: CantonNode, +NodeBootstrap <: CantonNodeBootstrap[Node]] + extends FlagCloseable { + + type InstanceName = String + + /** Returns the startup group (nodes in the same group will start together) + * + * Mediator & Topology manager automatically connect to a domain. Participants + * require an external call to reconnectDomains. Therefore, we can start participants, sequencer and domain + * nodes together, but we have to wait for the sequencers to be up before we can kick off mediators & topology managers. + */ + def startUpGroup: Int + + /** Returns the names of all known nodes */ + def names(): Seq[InstanceName] + + /** Start an individual node by name */ + def start(name: InstanceName)(implicit + traceContext: TraceContext + ): EitherT[Future, StartupError, Unit] + + def startAndWait(name: InstanceName)(implicit + traceContext: TraceContext + ): Either[StartupError, Unit] + + /** Is the named node running? */ + def isRunning(name: InstanceName): Boolean + + /** Get the single running node */ + def getRunning(name: InstanceName): Option[NodeBootstrap] + + /** Get the node while it is still being started. This is mostly useful during testing to access the node in earlier + * stages of its initialization phase. + */ + def getStarting(name: InstanceName): Option[NodeBootstrap] + + /** Stop the named node */ + def stop(name: InstanceName)(implicit + traceContext: TraceContext + ): EitherT[Future, ShutdownError, Unit] + + def stopAndWait(name: InstanceName)(implicit + traceContext: TraceContext + ): Either[ShutdownError, Unit] + + /** Get nodes that are currently running */ + def running: Seq[NodeBootstrap] + + /** Independently run any pending database migrations for the named node */ + def migrateDatabase(name: InstanceName): Either[StartupError, Unit] + + /** Independently repair the Flyway schema history table for the named node to reset Flyway migration checksums etc */ + def repairDatabaseMigration(name: InstanceName): Either[StartupError, Unit] +} + +private sealed trait ManagedNodeStage[T] + +private final case class PreparingDatabase[T]( + promise: Promise[Either[StartupError, T]] +) extends ManagedNodeStage[T] + +private final case class StartingUp[T]( + promise: Promise[Either[StartupError, T]], + node: T, +) extends ManagedNodeStage[T] + +private final case class Running[T](node: T) extends ManagedNodeStage[T] + +/** Nodes group that can start nodes with the provided configuration and factory */ +class ManagedNodes[ + Node <: CantonNode, + NodeConfig <: LocalNodeConfig, + NodeParameters <: CantonNodeParameters, + NodeBootstrap <: CantonNodeBootstrap[Node], +]( + create: (String, NodeConfig) => NodeBootstrap, + migrationsFactory: DbMigrationsFactory, + override protected val timeouts: ProcessingTimeout, + configs: Map[String, NodeConfig], + parametersFor: String => CantonNodeParameters, + override val startUpGroup: Int, + protected val loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends Nodes[Node, NodeBootstrap] + with NamedLogging + with HasCloseContext + with FlagCloseableAsync { + + private val nodes = TrieMap[InstanceName, ManagedNodeStage[NodeBootstrap]]() + override lazy val names: Seq[InstanceName] = configs.keys.toSeq + + override def running: Seq[NodeBootstrap] = nodes.values.toSeq.collect { case Running(node) => + node + } + + def startAndWait(name: InstanceName)(implicit + traceContext: TraceContext + ): Either[StartupError, Unit] = + timeouts.unbounded.await(s"Starting node $name")(start(name).value) + + override def start( + name: InstanceName + )(implicit + traceContext: TraceContext + ): EitherT[Future, StartupError, Unit] = + EitherT + .fromEither[Future]( + configs + .get(name) + .toRight(ConfigurationNotFound(name): StartupError) + ) + .flatMap(startNode(name, _).map(_ => ())) + + private def startNode( + name: InstanceName, + config: NodeConfig, + ): EitherT[Future, StartupError, NodeBootstrap] = if (isClosing) + EitherT.leftT(ShutdownDuringStartup(name, "Won't start during shutdown")) + else { + def runStartup( + promise: Promise[Either[StartupError, NodeBootstrap]] + ): EitherT[Future, StartupError, NodeBootstrap] = { + val params = parametersFor(name) + val startup = for { + // start migration + _ <- EitherT(Future { checkMigration(name, config.storage, params) }) + instance = { + val instance = create(name, config) + nodes.put(name, StartingUp(promise, instance)).discard + instance + } + _ <- + instance.start().leftMap { error => + instance.close() // clean up resources allocated during instance creation (e.g., db) + StartFailed(name, error): StartupError + } + } yield { + // register the running instance + nodes.put(name, Running(instance)).discard + instance + } + import com.digitalasset.canton.util.Thereafter.syntax.* + promise.completeWith(startup.value) + // remove node upon failure + startup.thereafter { + case Success(Right(_)) => () + case Success(Left(_)) => + nodes.remove(name).discard + case Failure(_) => + nodes.remove(name).discard + } + } + + blocking(synchronized { + nodes.get(name) match { + case Some(PreparingDatabase(promise)) => EitherT(promise.future) + case Some(StartingUp(promise, _)) => EitherT(promise.future) + case Some(Running(node)) => EitherT.rightT(node) + case None => + val promise = Promise[Either[StartupError, NodeBootstrap]]() + nodes + .put(name, PreparingDatabase(promise)) + .discard // discard is okay as this is running in the sync block + runStartup(promise) // startup will run async + } + }) + + } + + private def configAndParams( + name: InstanceName + ): Either[StartupError, (NodeConfig, CantonNodeParameters)] = { + for { + config <- configs.get(name).toRight(ConfigurationNotFound(name): StartupError) + _ <- checkNotRunning(name) + params = parametersFor(name) + } yield (config, params) + } + + override def migrateDatabase(name: InstanceName): Either[StartupError, Unit] = blocking( + synchronized { + for { + cAndP <- configAndParams(name) + (config, params) = cAndP + _ <- runMigration(name, config.storage, params.devVersionSupport) + } yield () + } + ) + + override def repairDatabaseMigration(name: InstanceName): Either[StartupError, Unit] = blocking( + synchronized { + for { + cAndP <- configAndParams(name) + (config, params) = cAndP + _ <- runRepairMigration(name, config.storage, params.devVersionSupport) + } yield () + } + ) + + override def isRunning(name: InstanceName): Boolean = nodes.contains(name) + + override def getRunning(name: InstanceName): Option[NodeBootstrap] = nodes.get(name).collect { + case Running(node) => node + } + + override def getStarting(name: InstanceName): Option[NodeBootstrap] = nodes.get(name).collect { + case StartingUp(_, node) => node + } + + override def stop( + name: InstanceName + )(implicit + traceContext: TraceContext + ): EitherT[Future, ShutdownError, Unit] = + for { + _ <- EitherT.fromEither[Future]( + configs.get(name).toRight[ShutdownError](ConfigurationNotFound(name)) + ) + _ <- nodes.get(name).traverse_(stopStage(name)) + } yield () + + override def stopAndWait(name: InstanceName)(implicit + traceContext: TraceContext + ): Either[ShutdownError, Unit] = + timeouts.unbounded.await(s"stopping node $name")(stop(name).value) + + private def stopStage(name: InstanceName)( + stage: ManagedNodeStage[NodeBootstrap] + )(implicit + traceContext: TraceContext, + ec: ExecutionContext, + ): EitherT[Future, ShutdownError, Unit] = { + EitherT(stage match { + // wait for the node to complete startup + case PreparingDatabase(promise) => promise.future + case StartingUp(promise, _) => promise.future + case Running(node) => Future.successful(Right(node)) + }).transform { + case Left(_) => + // we can remap a startup failure to a success here, as we don't want the + // startup failure to propagate into a shutdown failure + Right(()) + case Right(node) => + nodes.remove(name).foreach { + // if there were other processes messing with the node, we won't shutdown + case Running(current) if node == current => + Lifecycle.close(node)(logger) + case _ => + logger.info(s"Node $name has already disappeared.") + } + Right(()) + } + } + + override protected def closeAsync(): Seq[AsyncOrSyncCloseable] = { + val runningInstances = nodes.toList + import TraceContext.Implicits.Empty.* + runningInstances.map { case (name, stage) => + AsyncCloseable(s"node-$name", stopStage(name)(stage).value, timeouts.closing) + } + } + + protected def runIfUsingDatabase[F[_]](storageConfig: StorageConfig)( + fn: DbConfig => F[Either[StartupError, Unit]] + )(implicit F: Applicative[F]): F[Either[StartupError, Unit]] = storageConfig match { + case dbConfig: DbConfig => fn(dbConfig) + case _ => F.pure(Right(())) + } + + // if database is fresh, we will migrate it. Otherwise, we will check if there is any pending migrations, + // which need to be triggered manually. + private def checkMigration( + name: InstanceName, + storageConfig: StorageConfig, + params: CantonNodeParameters, + ): Either[StartupError, Unit] = + runIfUsingDatabase[Id](storageConfig) { dbConfig => + val migrations = migrationsFactory.create(dbConfig, name, params.devVersionSupport) + import TraceContext.Implicits.Empty.* + logger.info(s"Setting up database schemas for $name") + + def errorMapping(err: DbMigrations.Error): StartupError = { + err match { + case DbMigrations.PendingMigrationError(msg) => PendingDatabaseMigration(name, msg) + case err: DbMigrations.FlywayError => FailedDatabaseMigration(name, err) + case err: DbMigrations.DatabaseError => FailedDatabaseMigration(name, err) + case err: DbMigrations.DatabaseVersionError => FailedDatabaseVersionChecks(name, err) + case err: DbMigrations.DatabaseConfigError => FailedDatabaseConfigChecks(name, err) + } + } + val retryConfig = + if (storageConfig.parameters.failFastOnStartup) RetryConfig.failFast + else RetryConfig.forever + + val result = migrations + .checkAndMigrate(params, retryConfig) + .leftMap(errorMapping) + + result.value.onShutdown( + Left(ShutdownDuringStartup(name, "DB migration check interrupted due to shutdown")) + ) + } + + private def checkNotRunning(name: InstanceName): Either[StartupError, Unit] = + if (isRunning(name)) Left(AlreadyRunning(name)) + else Right(()) + + private def runMigration( + name: InstanceName, + storageConfig: StorageConfig, + devVersionSupport: Boolean, + ): Either[StartupError, Unit] = + runIfUsingDatabase[Id](storageConfig) { dbConfig => + migrationsFactory + .create(dbConfig, name, devVersionSupport) + .migrateDatabase() + .leftMap(FailedDatabaseMigration(name, _)) + .value + .onShutdown(Left(ShutdownDuringStartup(name, "DB migration interrupted due to shutdown"))) + } + + private def runRepairMigration( + name: InstanceName, + storageConfig: StorageConfig, + devVersionSupport: Boolean, + ): Either[StartupError, Unit] = + runIfUsingDatabase[Id](storageConfig) { dbConfig => + migrationsFactory + .create(dbConfig, name, devVersionSupport) + .repairFlywayMigration() + .leftMap(FailedDatabaseRepairMigration(name, _)) + .value + .onShutdown( + Left(ShutdownDuringStartup(name, "DB repair migration interrupted due to shutdown")) + ) + } +} + +class ParticipantNodes[B <: CantonNodeBootstrap[N], N <: CantonNode, PC <: LocalParticipantConfig]( + create: (String, PC) => B, // (nodeName, config) => bootstrap + migrationsFactory: DbMigrationsFactory, + timeouts: ProcessingTimeout, + configs: Map[String, PC], + parametersFor: String => ParticipantNodeParameters, + loggerFactory: NamedLoggerFactory, +)(implicit + protected val executionContext: ExecutionContextIdlenessExecutorService +) extends ManagedNodes[N, PC, ParticipantNodeParameters, B]( + create, + migrationsFactory, + timeouts, + configs, + parametersFor, + startUpGroup = 0, + loggerFactory, + ) { + private def migrateIndexerDatabase(name: InstanceName): Either[StartupError, Unit] = { + import TraceContext.Implicits.Empty.* + + for { + config <- configs.get(name).toRight(ConfigurationNotFound(name)) + parameters = parametersFor(name) + _ = parameters.processingTimeouts.unbounded.await("migrate indexer database") { + runIfUsingDatabase[Future](config.storage) { dbConfig => + CantonLedgerApiServerWrapper + .migrateSchema( + MigrateSchemaConfig( + dbConfig, + config.ledgerApi.additionalMigrationPaths, + ), + loggerFactory, + ) + .map(_.asRight) + } + } + } yield () + } + + override def migrateDatabase(name: InstanceName): Either[StartupError, Unit] = + for { + _ <- super.migrateDatabase(name) + _ <- migrateIndexerDatabase(name) + } yield () +} + +object ParticipantNodes { + type ParticipantNodesOld[PC <: LocalParticipantConfig] = + ParticipantNodes[ParticipantNodeBootstrap, ParticipantNode, PC] + type ParticipantNodesX[PC <: LocalParticipantConfig] = + ParticipantNodes[ParticipantNodeBootstrapX, ParticipantNodeX, PC] +} + +class DomainNodes[DC <: DomainConfig]( + create: (String, DC) => DomainNodeBootstrap, + migrationsFactory: DbMigrationsFactory, + timeouts: ProcessingTimeout, + configs: Map[String, DC], + parameters: String => DomainNodeParameters, + loggerFactory: NamedLoggerFactory, +)(implicit + protected val executionContext: ExecutionContextIdlenessExecutorService +) extends ManagedNodes[Domain, DC, DomainNodeParameters, DomainNodeBootstrap]( + create, + migrationsFactory, + timeouts, + configs, + parameters, + startUpGroup = 0, + loggerFactory, + ) diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/health/HealthCheck.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/health/HealthCheck.scala new file mode 100644 index 0000000000..1b2f3db393 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/health/HealthCheck.scala @@ -0,0 +1,232 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.health + +import com.digitalasset.canton.config.{CheckConfig, ProcessingTimeout} +import com.digitalasset.canton.environment.Environment +import com.digitalasset.canton.lifecycle.Lifecycle +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.metrics.HealthMetrics +import com.digitalasset.canton.participant.ParticipantNode +import com.digitalasset.canton.participant.admin.PingService +import com.digitalasset.canton.time.{Clock, WallClock} +import com.digitalasset.canton.topology.UniqueIdentifier +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.EitherUtil +import org.apache.pekko.actor.ActorSystem + +import java.time.{Duration, Instant} +import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference} +import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{ExecutionContext, Future} +import scala.util.control.NonFatal + +/** Check to determine a health check response */ +trait HealthCheck extends AutoCloseable { + + /** Ask the check to decide whether we're healthy. + * The future should complete successfully with a HealthCheckResult. + * If the future fails this implies there was an error performing the check itself. + */ + def isHealthy(implicit traceContext: TraceContext): Future[HealthCheckResult] + override def close(): Unit = () +} + +/** Constant response for a health check (used by the always-healthy configuration) */ +final case class StaticHealthCheck(private val result: HealthCheckResult) extends HealthCheck { + override def isHealthy(implicit traceContext: TraceContext): Future[HealthCheckResult] = + Future.successful(result) +} + +/** Pings the supplied participant to determine health. + * Will be considered unhealthy if unable to resolve the ping service for the participant alias (likely due to startup and initialization). + * Ping success considered healthy, ping failure considered unhealthy. + * If the ping future fails (rather than completing successfully with a failure), it will be converted to a unhealthy response and the exception will be logged at DEBUG level. + */ +class PingHealthCheck( + environment: Environment, + participantAlias: String, + timeout: FiniteDuration, + metrics: HealthMetrics, + protected val loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext) + extends HealthCheck + with NamedLogging { + + private val pingLatencyTimer = metrics.pingLatency + + override def isHealthy(implicit traceContext: TraceContext): Future[HealthCheckResult] = + getParticipant match { + case Left(failed) => Future.successful(failed) + case Right(participant) => + val partyId = participant.id.uid + val pingService = participant.ledgerApiDependentCantonServices.adminWorkflowServices.ping + ping(pingService, partyId) + } + + private def getParticipant: Either[HealthCheckResult, ParticipantNode] = + for { + init <- Option( + environment.participants + ) // if this check is called before the collection has been initialized it will be null, so be very defensive + .flatMap(_.getRunning(participantAlias)) + .toRight(Unhealthy("participant is not started")) + participant <- init.getNode.toRight(Unhealthy("participant has not been initialized")) + _ <- Either.cond( + participant.readyDomains.exists(_._2), + (), + Unhealthy("participant is not connected to any domains"), + ) + } yield participant + + private def ping(pingService: PingService, partyId: UniqueIdentifier)(implicit + traceContext: TraceContext + ): Future[HealthCheckResult] = { + val timer = pingLatencyTimer.time() + val started = Instant.now() + val pingResult = for { + result <- pingService.ping(Set(partyId.toProtoPrimitive), Set(), timeout.toMillis) + } yield { + timer.stop() + result match { + case PingService.Success(roundTripTime, _) => + logger.debug(s"Health check ping completed in ${roundTripTime}") + Healthy + case PingService.Failure => + val elapsedTime = Duration.between(started, Instant.now) + logger.warn(s"Health check ping failed (elapsed time ${elapsedTime.toMillis}ms)") + Unhealthy("ping failure") + } + } + + pingResult recover { case NonFatal(ex) => + logger.warn("health check ping failed", ex) + Unhealthy("ping failed") + } + } +} + +/** For components that simply flag whether they are active or not, just return that. + * @param isActive should return a Right if the instance is active, + * should return Left with a message to be returned on the health endpoint if not. + */ +class IsActiveCheck( + isActive: () => Either[String, Unit], + protected val loggerFactory: NamedLoggerFactory, +) extends HealthCheck { + override def isHealthy(implicit traceContext: TraceContext): Future[HealthCheckResult] = + Future.successful { + isActive().fold(Unhealthy, _ => Healthy) + } +} + +/** Rather than executing a check for every isHealthy call periodically run the check and cache the result, and return this cached value for isHealthy. + */ +class PeriodicCheck( + clock: Clock, + interval: FiniteDuration, + protected val loggerFactory: NamedLoggerFactory, +)(check: HealthCheck)(implicit executionContext: ExecutionContext) + extends HealthCheck + with NamedLogging { + + /** Once closed we should prevent checks from being run and scheduled */ + private val closed = new AtomicBoolean(false) + + /** Cached value that will hold the previous health check result. + * It is initialized by calling setupNextCheck so until the first result completes it will hold a pending future. + */ + private val lastCheck = new AtomicReference[Future[HealthCheckResult]](setupFirstCheck) + + /** Returns the health check promise rather than updating lastCheck, so is suitable for initializing lastCheck */ + private def setupFirstCheck: Future[HealthCheckResult] = { + val isHealthy = TraceContext.withNewTraceContext(check.isHealthy(_)) + isHealthy onComplete { _ => + setupNextCheck() + } + isHealthy + } + + /** Runs the check and when completed updates the value of lastCheck */ + private def runCheck(): Unit = if (!closed.get()) { + val isHealthy = TraceContext.withNewTraceContext(check.isHealthy(_)) + + isHealthy onComplete { _ => + lastCheck.set(isHealthy) + setupNextCheck() + } + } + + private def setupNextCheck(): Unit = + if (!closed.get()) { + val _ = clock.scheduleAfter(_ => runCheck(), Duration.ofMillis(interval.toMillis)) + } + + override def close(): Unit = { + closed.set(true) + Lifecycle.close(clock)(logger) + } + + override def isHealthy(implicit traceContext: TraceContext): Future[HealthCheckResult] = + lastCheck.get() +} + +object HealthCheck { + def apply( + config: CheckConfig, + metrics: HealthMetrics, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(environment: Environment)(implicit system: ActorSystem): HealthCheck = { + implicit val executionContext = system.dispatcher + config match { + case CheckConfig.AlwaysHealthy => + StaticHealthCheck(Healthy) + case CheckConfig.Ping(participantAlias, interval, timeout) => + // only ping periodically rather than on every health check + new PeriodicCheck( + new WallClock(timeouts, loggerFactory.appendUnnamedKey("clock", "ping-health-check")), + interval.underlying, + loggerFactory, + )( + new PingHealthCheck( + environment, + participantAlias, + timeout.underlying, + metrics, + loggerFactory, + ) + ) + + case CheckConfig.IsActive(participantO) => + val configuredParticipants = environment.config.participantsByString + + val participantName = participantO match { + case Some(configName) => + if (configuredParticipants.contains(configName)) configName + else sys.error(s"Participant with name '$configName' is not configured") + case None => + configuredParticipants.headOption + .map(_._1) + .getOrElse( + s"IsActive health check must be configured with the participant name to check as there are many participants configured for this environment" + ) + } + + def isActive: Either[String, Unit] = + for { + participant <- environment.participants + .getRunning(participantName) + .toRight("Participant is not running") + runningParticipant <- participant.getNode.toRight("Participant is not initialized") + _ <- EitherUtil.condUnitE( + runningParticipant.sync.isActive(), + "Participant is not active", + ) + } yield () + + new IsActiveCheck(() => isActive, loggerFactory) + } + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/health/HealthCheckResult.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/health/HealthCheckResult.scala new file mode 100644 index 0000000000..0f9460d52c --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/health/HealthCheckResult.scala @@ -0,0 +1,15 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.health + +/** Result of a health check */ +sealed trait HealthCheckResult + +/** Everything that the check checks is healthy */ +object Healthy extends HealthCheckResult + +/** The check deems something unhealthy + * @param message User printable message describing why a unhealthy result was given + */ +final case class Unhealthy(message: String) extends HealthCheckResult diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/health/HealthServer.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/health/HealthServer.scala new file mode 100644 index 0000000000..ec744f04f2 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/health/HealthServer.scala @@ -0,0 +1,87 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.health + +import com.digitalasset.canton.config.RequireTypes.Port +import com.digitalasset.canton.config.{HealthConfig, ProcessingTimeout} +import com.digitalasset.canton.environment.Environment +import com.digitalasset.canton.lifecycle.* +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.metrics.HealthMetrics +import com.digitalasset.canton.tracing.TraceContext +import com.google.common.annotations.VisibleForTesting +import org.apache.pekko.actor.ActorSystem +import org.apache.pekko.http.scaladsl.Http +import org.apache.pekko.http.scaladsl.marshalling.{Marshaller, ToResponseMarshaller} +import org.apache.pekko.http.scaladsl.model.{HttpEntity, HttpResponse, StatusCodes} +import org.apache.pekko.http.scaladsl.server.Directives.* +import org.apache.pekko.http.scaladsl.server.Route +import org.apache.pekko.http.scaladsl.server.directives.DebuggingDirectives + +class HealthServer( + check: HealthCheck, + address: String, + port: Port, + protected override val timeouts: ProcessingTimeout, + protected val loggerFactory: NamedLoggerFactory, +)(implicit system: ActorSystem) + extends FlagCloseableAsync + with NamedLogging { + + private val binding = { + import TraceContext.Implicits.Empty.* + timeouts.unbounded.await(s"Binding the health server")( + Http().newServerAt(address, port.unwrap).bind(HealthServer.route(check)) + ) + } + + override protected def closeAsync(): Seq[AsyncOrSyncCloseable] = { + import TraceContext.Implicits.Empty.* + List[AsyncOrSyncCloseable]( + AsyncCloseable("binding", binding.unbind(), timeouts.shutdownNetwork), + SyncCloseable("check", Lifecycle.close(check)(logger)), + ) + } +} + +object HealthServer { + def apply( + config: HealthConfig, + metrics: HealthMetrics, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(environment: Environment)(implicit system: ActorSystem): HealthServer = { + val check = HealthCheck(config.check, metrics, timeouts, loggerFactory)(environment) + + new HealthServer(check, config.server.address, config.server.port, timeouts, loggerFactory) + } + + /** Routes for powering the health server. + * Provides: + * GET /health => calls check and returns: + * 200 if healthy + * 500 if unhealthy + * 500 if the check fails + */ + @VisibleForTesting + private[health] def route(check: HealthCheck): Route = { + implicit val _marshaller: ToResponseMarshaller[HealthCheckResult] = + Marshaller.opaque { + case Healthy => + HttpResponse(status = StatusCodes.OK, entity = HttpEntity("healthy")) + case Unhealthy(message) => + HttpResponse(status = StatusCodes.InternalServerError, entity = HttpEntity(message)) + } + + get { + path("health") { + DebuggingDirectives.logRequest("health-request") { + DebuggingDirectives.logRequestResult("health-request-response") { + complete(TraceContext.withNewTraceContext(check.isHealthy(_))) + } + } + } + } + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/metrics/MetricsFactory.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/metrics/MetricsFactory.scala new file mode 100644 index 0000000000..6c35ef6038 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/metrics/MetricsFactory.scala @@ -0,0 +1,382 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.metrics + +import com.codahale.metrics +import com.codahale.metrics.{Metric, MetricFilter, MetricRegistry} +import com.daml.metrics.api.{MetricName, MetricsContext} +import com.daml.metrics.grpc.DamlGrpcServerMetrics +import com.daml.metrics.{ + ExecutorServiceMetrics, + HealthMetrics as DMHealth, + HistogramDefinition, + JvmMetricSet, +} +import com.digitalasset.canton.DomainAlias +import com.digitalasset.canton.buildinfo.BuildInfo +import com.digitalasset.canton.config.DeprecatedConfigUtils.DeprecatedFieldsFor +import com.digitalasset.canton.config.{DeprecatedConfigUtils, NonNegativeFiniteDuration} +import com.digitalasset.canton.domain.metrics.{ + DomainMetrics, + EnvMetrics, + MediatorNodeMetrics, + SequencerMetrics, +} +import com.digitalasset.canton.metrics.MetricHandle.{ + CantonDropwizardMetricsFactory, + CantonOpenTelemetryMetricsFactory, +} +import com.digitalasset.canton.metrics.MetricsConfig.MetricsFilterConfig +import com.digitalasset.canton.participant.metrics.ParticipantMetrics +import com.typesafe.scalalogging.LazyLogging +import io.opentelemetry.api.OpenTelemetry +import io.opentelemetry.api.metrics.Meter +import io.prometheus.client.dropwizard.DropwizardExports + +import java.io.File +import java.util.Locale +import java.util.concurrent.TimeUnit +import scala.annotation.nowarn +import scala.collection.concurrent.TrieMap + +final case class MetricsConfig( + reporters: Seq[MetricsReporterConfig] = Seq.empty, + reportJvmMetrics: Boolean = false, + reportExecutionContextMetrics: Boolean = false, + histograms: Seq[HistogramDefinition] = Seq.empty, +) + +object MetricsReporterConfig { + object DeprecatedImplicits { + implicit def deprecatedDomainBaseConfig[X <: MetricsReporterConfig]: DeprecatedFieldsFor[X] = + new DeprecatedFieldsFor[MetricsReporterConfig] { + override def deprecatePath: List[DeprecatedConfigUtils.DeprecatedConfigPath[?]] = List( + DeprecatedConfigUtils.DeprecatedConfigPath[String]("type", since = "2.6.0", Some("jmx")), + DeprecatedConfigUtils.DeprecatedConfigPath[String]("type", since = "2.6.0", Some("csv")), + DeprecatedConfigUtils + .DeprecatedConfigPath[String]("type", since = "2.6.0", Some("graphite")), + DeprecatedConfigUtils.DeprecatedConfigPath[String]("filters", since = "2.6.0"), + ) + } + } +} + +sealed trait MetricsReporterConfig { + def filters: Seq[MetricsFilterConfig] + + def metricFilter: MetricFilter = + (name: String, _: Metric) => filters.isEmpty || filters.exists(_.matches(name)) +} + +sealed trait MetricsPrefix +object MetricsPrefix { + + /** Do not use a prefix */ + object NoPrefix extends MetricsPrefix + + /** Use a static text string as prefix */ + final case class Static(prefix: String) extends MetricsPrefix + + /** Uses the hostname as the prefix */ + object Hostname extends MetricsPrefix + + def prefixFromConfig(prefix: MetricsPrefix): Option[String] = prefix match { + case Hostname => Some(java.net.InetAddress.getLocalHost.getHostName) + case NoPrefix => None + case Static(prefix) => Some(prefix) + } +} + +object MetricsConfig { + + final case class JMX(filters: Seq[MetricsFilterConfig] = Seq.empty) extends MetricsReporterConfig + + final case class Csv( + directory: File, + interval: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(5), + filters: Seq[MetricsFilterConfig] = Seq.empty, + ) extends MetricsReporterConfig + + final case class Graphite( + address: String = "localhost", + port: Int = 2003, + prefix: MetricsPrefix = MetricsPrefix.Hostname, + interval: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(30), + filters: Seq[MetricsFilterConfig] = Seq.empty, + ) extends MetricsReporterConfig + + final case class Prometheus(address: String = "localhost", port: Int = 9100) + extends MetricsReporterConfig { + override def filters: Seq[MetricsFilterConfig] = Seq.empty + } + + final case class MetricsFilterConfig( + startsWith: String = "", + contains: String = "", + endsWith: String = "", + ) { + def matches(name: String): Boolean = + name.startsWith(startsWith) && name.contains(contains) && name.endsWith(endsWith) + } +} + +final case class MetricsFactory( + reporters: Seq[metrics.Reporter], + registry: metrics.MetricRegistry, + reportJVMMetrics: Boolean, + meter: Meter, + factoryType: MetricsFactoryType, + reportExecutionContextMetrics: Boolean, +) extends AutoCloseable { + + @deprecated("Use LabeledMetricsFactory", since = "2.7.0") + val metricsFactory: MetricHandle.MetricsFactory = + createUnlabeledMetricsFactory(MetricsContext.Empty, registry) + + @nowarn("cat=deprecation") private val envMetrics = new EnvMetrics(metricsFactory) + private val participants = TrieMap[String, ParticipantMetrics]() + private val domains = TrieMap[String, DomainMetrics]() + private val sequencers = TrieMap[String, SequencerMetrics]() + private val mediators = TrieMap[String, MediatorNodeMetrics]() + private val allNodeMetrics: Seq[TrieMap[String, ?]] = + Seq(participants, domains, sequencers, mediators) + private def nodeMetricsExcept(toExclude: TrieMap[String, ?]): Seq[TrieMap[String, ?]] = + allNodeMetrics filterNot (_ eq toExclude) + + val executionServiceMetrics: ExecutorServiceMetrics = new ExecutorServiceMetrics( + createLabeledMetricsFactory(MetricsContext.Empty) + ) + + object benchmark extends MetricsGroup(MetricName(MetricsFactory.prefix :+ "benchmark"), registry) + + object health extends HealthMetrics(MetricName(MetricsFactory.prefix :+ "health"), registry) + + // add default, system wide metrics to the metrics reporter + if (reportJVMMetrics) { + registry.registerAll(new JvmMetricSet) // register Daml repo JvmMetricSet + JvmMetricSet.registerObservers() // requires OpenTelemetry to have the global lib setup + } + + private def newRegistry(prefix: String): metrics.MetricRegistry = { + val nested = new metrics.MetricRegistry() + registry.register(prefix, nested) + nested + } + + def forParticipant(name: String): ParticipantMetrics = { + participants.getOrElseUpdate( + name, { + val metricName = deduplicateName(name, "participant", participants) + val participantMetricsContext = + MetricsContext("participant" -> name, "component" -> "participant") + val participantRegistry = newRegistry(metricName) + new ParticipantMetrics( + name, + MetricsFactory.prefix, + createUnlabeledMetricsFactory(participantMetricsContext, participantRegistry), + createLabeledMetricsFactory( + participantMetricsContext + ), + participantRegistry, + reportExecutionContextMetrics, + ) + }, + ) + } + + def forEnv: EnvMetrics = envMetrics + + def forDomain(name: String): DomainMetrics = { + domains.getOrElseUpdate( + name, { + val metricName = deduplicateName(name, "domain", domains) + val domainMetricsContext = MetricsContext("domain" -> name, "component" -> "domain") + val labeledMetricsFactory = + createLabeledMetricsFactory(domainMetricsContext) + new DomainMetrics( + MetricsFactory.prefix, + createUnlabeledMetricsFactory(domainMetricsContext, newRegistry(metricName)), + new DamlGrpcServerMetrics(labeledMetricsFactory, "domain"), + new DMHealth(labeledMetricsFactory), + ) + }, + ) + } + + def forSequencer(name: String): SequencerMetrics = { + sequencers.getOrElseUpdate( + name, { + val metricName = deduplicateName(name, "sequencer", sequencers) + val sequencerMetricsContext = + MetricsContext("sequencer" -> name, "component" -> "sequencer") + val labeledMetricsFactory = createLabeledMetricsFactory( + sequencerMetricsContext + ) + new SequencerMetrics( + MetricsFactory.prefix, + createUnlabeledMetricsFactory(sequencerMetricsContext, newRegistry(metricName)), + new DamlGrpcServerMetrics(labeledMetricsFactory, "sequencer"), + new DMHealth(labeledMetricsFactory), + ) + }, + ) + } + + def forMediator(name: String): MediatorNodeMetrics = { + mediators.getOrElseUpdate( + name, { + val metricName = deduplicateName(name, "mediator", mediators) + val mediatorMetricsContext = MetricsContext("mediator" -> name, "component" -> "mediator") + val labeledMetricsFactory = + createLabeledMetricsFactory(mediatorMetricsContext) + new MediatorNodeMetrics( + MetricsFactory.prefix, + createUnlabeledMetricsFactory(mediatorMetricsContext, newRegistry(metricName)), + new DamlGrpcServerMetrics(labeledMetricsFactory, "mediator"), + new DMHealth(labeledMetricsFactory), + ) + }, + ) + } + + /** de-duplicate name if there is someone using the same name for another type of node (not sure that will ever happen) + */ + private def deduplicateName( + name: String, + nodeType: String, + nodesToExclude: TrieMap[String, ?], + ): String = + if (nodeMetricsExcept(nodesToExclude).exists(_.keySet.contains(name))) + s"$nodeType-$name" + else name + + private def createLabeledMetricsFactory(extraContext: MetricsContext) = { + factoryType match { + case MetricsFactoryType.InMemory(provider) => + provider(extraContext) + case MetricsFactoryType.External => + new CantonOpenTelemetryMetricsFactory( + meter, + globalMetricsContext = MetricsContext( + "canton_version" -> BuildInfo.version + ).merge(extraContext), + ) + } + } + + private def createUnlabeledMetricsFactory( + extraContext: MetricsContext, + registry: MetricRegistry, + ) = factoryType match { + case MetricsFactoryType.InMemory(builder) => builder(extraContext) + case MetricsFactoryType.External => new CantonDropwizardMetricsFactory(registry) + } + + /** returns the documented metrics by possibly creating fake participants / domains */ + def metricsDoc(): (Seq[MetricDoc.Item], Seq[MetricDoc.Item]) = { + def sorted(lst: Seq[MetricDoc.Item]): Seq[MetricDoc.Item] = + lst + .groupBy(_.name) + .flatMap(_._2.headOption.toList) + .toSeq + .sortBy(_.name) + + val participantMetrics: ParticipantMetrics = + participants.headOption.map(_._2).getOrElse(forParticipant("dummyParticipant")) + val participantItems = MetricDoc.getItems(participantMetrics) + val clientMetrics = + MetricDoc.getItems(participantMetrics.domainMetrics(DomainAlias.tryCreate(""))) + val domainMetrics = MetricDoc.getItems( + domains.headOption + .map { case (_, domainMetrics) => domainMetrics } + .getOrElse(forDomain("dummyDomain")) + ) + + // the fake instances are fine here as we do this anyway only when we build and export the docs + (sorted(participantItems ++ clientMetrics), sorted(domainMetrics)) + } + + override def close(): Unit = reporters.foreach(_.close()) + +} + +object MetricsFactory extends LazyLogging { + + import MetricsConfig.* + + val prefix: MetricName = MetricName("canton") + + def forConfig( + config: MetricsConfig, + openTelemetry: OpenTelemetry, + metricsFactoryType: MetricsFactoryType, + ): MetricsFactory = { + val registry = new metrics.MetricRegistry() + val reporter = registerReporter(config, registry) + new MetricsFactory( + reporter, + registry, + config.reportJvmMetrics, + openTelemetry.meterBuilder("canton").build(), + metricsFactoryType, + config.reportExecutionContextMetrics, + ) + } + + private def registerReporter( + config: MetricsConfig, + registry: metrics.MetricRegistry, + ): Seq[metrics.Reporter] = { + config.reporters.map { + + case reporterConfig @ JMX(_filters) => + val reporter = + metrics.jmx.JmxReporter.forRegistry(registry).filter(reporterConfig.metricFilter).build() + logger.debug("Starting metrics reporting using JMX") + reporter.start() + reporter + + case reporterConfig @ Csv(directory, interval, _filters) => + directory.mkdirs() + logger.debug(s"Starting metrics reporting to csv-file ${directory.toString}") + val reporter = metrics.CsvReporter + .forRegistry(registry) + .filter(reporterConfig.metricFilter) + .formatFor(Locale.ENGLISH) // Format decimal numbers like "12345.12345". + .build(directory) + reporter.start(interval.unwrap.toMillis, TimeUnit.MILLISECONDS) + reporter + + case reporterConfig @ Graphite(address, port, prefix, interval, _filters) => + logger.debug(s"Starting metrics reporting for Graphite to $address:$port") + val builder = metrics.graphite.GraphiteReporter + .forRegistry(registry) + .filter(reporterConfig.metricFilter) + val reporter = MetricsPrefix + .prefixFromConfig(prefix) + .fold(builder)(str => builder.prefixedWith(str)) + .build(new metrics.graphite.Graphite(address, port)) + reporter.start(interval.unwrap.toMillis, TimeUnit.MILLISECONDS) + reporter + + // OpenTelemetry registers the prometheus collector during initialization + case Prometheus(hostname, port) => + logger.debug(s"Exposing metrics for Prometheus on port $hostname:$port") + new DropwizardExports(registry).register[DropwizardExports]() + val reporter = new Reporters.Prometheus(hostname, port) + reporter + } + } +} + +class HealthMetrics(prefix: MetricName, registry: metrics.MetricRegistry) + extends MetricsGroup(prefix, registry) { + + val pingLatency: metrics.Timer = timer("ping-latency") +} + +abstract class MetricsGroup(prefix: MetricName, registry: metrics.MetricRegistry) { + + def timer(name: String): metrics.Timer = registry.timer(MetricName(prefix :+ name)) +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/metrics/MetricsSnapshot.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/metrics/MetricsSnapshot.scala new file mode 100644 index 0000000000..291ccfbf38 --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/metrics/MetricsSnapshot.scala @@ -0,0 +1,32 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.metrics + +import com.codahale.metrics +import io.opentelemetry.sdk.metrics.data.MetricData + +import scala.jdk.CollectionConverters.* + +final case class MetricsSnapshot( + timers: Map[String, metrics.Timer], + counters: Map[String, metrics.Counter], + gauges: Map[String, metrics.Gauge[_]], + histograms: Map[String, metrics.Histogram], + meters: Map[String, metrics.Meter], + otelMetrics: Seq[MetricData], +) + +object MetricsSnapshot { + + def apply(registry: metrics.MetricRegistry, reader: OnDemandMetricsReader): MetricsSnapshot = { + MetricsSnapshot( + timers = registry.getTimers.asScala.toMap, + counters = registry.getCounters.asScala.toMap, + gauges = registry.getGauges.asScala.toMap, + histograms = registry.getHistograms.asScala.toMap, + meters = registry.getMeters.asScala.toMap, + otelMetrics = reader.read(), + ) + } +} diff --git a/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/metrics/Reporters.scala b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/metrics/Reporters.scala new file mode 100644 index 0000000000..6ea269ef1f --- /dev/null +++ b/canton-3x/community/app-base/src/main/scala/com/digitalasset/canton/metrics/Reporters.scala @@ -0,0 +1,16 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.metrics + +import com.codahale.metrics.Reporter +import io.prometheus.client.exporter.HTTPServer + +object Reporters { + + class Prometheus(hostname: String, port: Int) extends Reporter { + val server: HTTPServer = new HTTPServer.Builder().withHostname(hostname).withPort(port).build(); + override def close(): Unit = server.close() + } + +} diff --git a/canton-3x/community/app/src/main/resources/LICENSE-open-source-bundle.txt b/canton-3x/community/app/src/main/resources/LICENSE-open-source-bundle.txt new file mode 120000 index 0000000000..d9788ff23b --- /dev/null +++ b/canton-3x/community/app/src/main/resources/LICENSE-open-source-bundle.txt @@ -0,0 +1 @@ +../../../../LICENSE-open-source-bundle.txt \ No newline at end of file diff --git a/canton-3x/community/app/src/main/resources/application.conf b/canton-3x/community/app/src/main/resources/application.conf new file mode 100644 index 0000000000..c03e4de7a2 --- /dev/null +++ b/canton-3x/community/app/src/main/resources/application.conf @@ -0,0 +1,15 @@ +pekko { + loggers = ["org.apache.pekko.event.slf4j.Slf4jLogger"] + loglevel = "INFO" + + # the pekko-http server is only used for the health http server within canton. + # It is difficult to configure HAProxy to supply a correct host header when + # we've configured the http-check for the grpc server. So just default to + # assuming requests with no host header are for localhost. + # This should be revisited if we ever expose the server for anything beyond + # health. + http.server.default-host-header = "localhost" + + # For canton applications we tear down pekko explicitly. + jvm-shutdown-hooks = off +} \ No newline at end of file diff --git a/canton-3x/community/app/src/main/resources/logback.xml b/canton-3x/community/app/src/main/resources/logback.xml new file mode 100644 index 0000000000..bea05e51ca --- /dev/null +++ b/canton-3x/community/app/src/main/resources/logback.xml @@ -0,0 +1,281 @@ + + + ]> + + + + true + + + + + + + + + ${LOG_LEVEL_STDOUT:-WARN} + + + + + + + + + + + + + + + + + + + + ${pattern} + + + ${LOG_LEVEL_STDOUT:-WARN} + + + + + + + + + + + + + + + + + ${LOG_FILE_NAME:-log/canton.log} + ${LOG_FILE_APPEND:-true} + + ${LOG_IMMEDIATE_FLUSH:-true} + + + + + + + ${LOG_FILE_NAME:-log/canton.log} + ${LOG_FILE_APPEND:-true} + + ${LOG_IMMEDIATE_FLUSH:-true} + + + %date [%thread] %-5level %logger{10} &entityCorrelationIdTrailingSpaceReplace; + + + + + + + + + + + + + ${LOG_FILE_NAME:-log/canton.log} + true + + ${LOG_IMMEDIATE_FLUSH:-true} + + + ${LOG_FILE_NAME:-log/canton.log}.%d{${LOG_FILE_ROLLING_PATTERN:-yyyy-MM-dd}}.gz + + ${LOG_FILE_HISTORY:-12} + + + + + + + + ${LOG_FILE_NAME:-log/canton.log} + true + + ${LOG_IMMEDIATE_FLUSH:-true} + + + ${LOG_FILE_NAME:-log/canton.log}.%d{${LOG_FILE_ROLLING_PATTERN:-yyyy-MM-dd}}.gz + + ${LOG_FILE_HISTORY:-12} + + + + %date [%thread] %-5level %logger{35} &entityCorrelationIdTrailingSpaceReplace; + + + + + + + + + + + + + + + + + + + + + + ${KMS_LOG_FILE_NAME:-log/canton_kms.log} + ${KMS_LOG_FILE_APPEND:-true} + + ${KMS_LOG_IMMEDIATE_FLUSH:-true} + + + + + + ${KMS_LOG_FILE_NAME:-log/canton_kms.log} + ${KMS_LOG_FILE_APPEND:-true} + + ${KMS_LOG_IMMEDIATE_FLUSH:-true} + + + %date [%thread] %-5level %logger{10} &entityCorrelationIdTrailingSpaceReplace; + + + + + + + + + + + + ${KMS_LOG_FILE_NAME:-log/canton_kms.log} + true + + ${KMS_LOG_IMMEDIATE_FLUSH:-true} + + + ${KMS_LOG_FILE_NAME:-log/canton_kms.log}.%d{${KMS_LOG_FILE_ROLLING_PATTERN:-yyyy-MM-dd}}.gz + + ${KMS_LOG_FILE_HISTORY:-0} + + + + + + + ${KMS_LOG_FILE_NAME:-log/canton_kms.log} + true + + ${KMS_LOG_IMMEDIATE_FLUSH:-true} + + + ${KMS_LOG_FILE_NAME:-log/canton_kms.log}.%d{${KMS_LOG_FILE_ROLLING_PATTERN:-yyyy-MM-dd}}.gz + + ${KMS_LOG_FILE_HISTORY:-0} + + + + %date [%thread] %-5level %logger{35} &entityCorrelationIdTrailingSpaceReplace; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ${LOG_LAST_ERRORS_FILE_NAME:-log/canton_errors.log} + ${LOG_FILE_APPEND:-true} + + + %date [%thread] %-5level %logger{10} &entityCorrelationIdTrailingSpaceReplace; + + + + + + + + + + + + + + + + FILE_LAST_ERRORS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/canton-3x/community/app/src/main/resources/repl/banner.txt b/canton-3x/community/app/src/main/resources/repl/banner.txt new file mode 100644 index 0000000000..0a40281ea0 --- /dev/null +++ b/canton-3x/community/app/src/main/resources/repl/banner.txt @@ -0,0 +1,10 @@ + _____ _ + / ____| | | + | | __ _ _ __ | |_ ___ _ __ + | | / _` | '_ \| __/ _ \| '_ \ + | |___| (_| | | | | || (_) | | | | + \_____\__,_|_| |_|\__\___/|_| |_| + + Welcome to Canton! + Type `help` to get started. `exit` to leave. + diff --git a/canton-3x/community/app/src/main/scala/com/digitalasset/canton/CantonAppDriver.scala b/canton-3x/community/app/src/main/scala/com/digitalasset/canton/CantonAppDriver.scala new file mode 100644 index 0000000000..aad68fac26 --- /dev/null +++ b/canton-3x/community/app/src/main/scala/com/digitalasset/canton/CantonAppDriver.scala @@ -0,0 +1,219 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton + +import better.files.File +import cats.syntax.either.* +import ch.qos.logback.classic.{Logger, LoggerContext} +import ch.qos.logback.core.status.{ErrorStatus, Status, StatusListener, WarnStatus} +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.buildinfo.BuildInfo +import com.digitalasset.canton.cli.{Cli, Command, LogFileAppender} +import com.digitalasset.canton.config.ConfigErrors.CantonConfigError +import com.digitalasset.canton.config.{CantonConfig, ConfigErrors, Generate} +import com.digitalasset.canton.environment.{Environment, EnvironmentFactory} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.tracing.NoTracing +import com.digitalasset.canton.version.ReleaseVersion +import com.typesafe.config.{Config, ConfigFactory} +import org.slf4j.LoggerFactory + +import java.util.concurrent.atomic.AtomicReference +import scala.util.control.NonFatal + +/** The Canton main application. + * + * Starts a set of domains and participant nodes. + */ +abstract class CantonAppDriver[E <: Environment] extends App with NamedLogging with NoTracing { + + protected def environmentFactory: EnvironmentFactory[E] + + protected def withManualStart(config: E#Config): E#Config + + protected def additionalVersions: Map[String, String] = Map.empty + + protected def printVersion(): Unit = { + (Map( + "Canton" -> BuildInfo.version, + "Daml Libraries" -> BuildInfo.damlLibrariesVersion, + "Supported Canton protocol versions" -> BuildInfo.protocolVersions.toString(), + ) ++ additionalVersions) foreach { case (name, version) => + Console.out.println(s"$name: $version") + } + } + + // BE CAREFUL: Set the environment variables before you touch anything related to + // logback as otherwise, the logback configuration will be read without these + // properties being considered + private val cliOptions = Cli.parse(args, printVersion()).getOrElse(sys.exit(1)) + cliOptions.installLogging() + + // Fail, if the log configuration cannot be read. + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + private val loggerContext = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext] + private val logbackStatusManager = loggerContext.getStatusManager + private val killingStatusListener: StatusListener = { + case status @ (_: WarnStatus | _: ErrorStatus) => + Console.err.println(s"Unable to load log configuration.\n$status") + Console.err.flush() + sys.exit(-1) + case _: Status => // ignore + } + logbackStatusManager.add(killingStatusListener) + + // Use the root logger as named logger to avoid a prefix "CantonApp" in log files. + override val loggerFactory: NamedLoggerFactory = NamedLoggerFactory.root + + // Adjust root and canton loggers which works even if a custom logback.xml is defined + Seq( + (cliOptions.levelCanton, "com.digitalasset"), + (cliOptions.levelCanton, "com.daml"), + (cliOptions.levelRoot, org.slf4j.Logger.ROOT_LOGGER_NAME), + ) + .foreach { + case (Some(level), loggerName) => + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + val root: Logger = LoggerFactory.getLogger(loggerName).asInstanceOf[Logger] + root.setLevel(level) + case (None, _) => + } + + logger.info(s"Starting Canton version ${ReleaseVersion.current}") + if (cliOptions.logTruncate) { + cliOptions.logFileAppender match { + case LogFileAppender.Rolling => + logger.warn( + "Ignoring log file truncation request, as it only works with flat log files, but here we use rolling log files." + ) + case LogFileAppender.Flat => + case LogFileAppender.Off => + } + } + + // Now that at least one line has been logged, deregister the killingStatusManager so that + // Canton does not die on a warning status. + logbackStatusManager.remove(killingStatusListener) + + private val environmentRef: AtomicReference[Option[E]] = new AtomicReference(None) + sys.runtime.addShutdownHook(new Thread(() => { + try { + logger.info("Shutting down...") + environmentRef.get().foreach(_.close()) + logger.info("Shutdown complete.") + } catch { + case NonFatal(exception) => + logger.error("Failed to shut down successfully.", exception) + } finally { + LoggerFactory.getILoggerFactory match { + case logbackLoggerContext: LoggerContext => + logger.info("Shutting down logger. Bye bye.") + logbackLoggerContext.stop() + case _ => + logger.warn( + "Logback is not bound via slf4j. Cannot shut down logger, this could result in lost log-messages." + ) + } + } + })) + logger.debug("Registered shutdown-hook.") + + val cantonConfig: E#Config = { + val mergedUserConfigsE = NonEmpty.from(cliOptions.configFiles) match { + case None if cliOptions.configMap.isEmpty => + Left(ConfigErrors.NoConfigFiles.Error()) + case None => Right(ConfigFactory.empty()) + case Some(neConfigFiles) => CantonConfig.parseAndMergeJustCLIConfigs(neConfigFiles) + } + val mergedUserConfigs = + mergedUserConfigsE.valueOr { _ => + sys.exit(1) + } + + val configFromMap = { + import scala.jdk.CollectionConverters.* + ConfigFactory.parseMap(cliOptions.configMap.asJava) + } + val finalConfig = CantonConfig.mergeConfigs(mergedUserConfigs, Seq(configFromMap)) + + val loadedConfig = loadConfig(finalConfig) match { + case Left(_) => + if (cliOptions.configFiles.sizeCompare(1) > 0) + writeConfigToTmpFile(mergedUserConfigs) + sys.exit(1) + case Right(loaded) => + if (cliOptions.manualStart) withManualStart(loaded) + else loaded + } + if (loadedConfig.monitoring.logging.logConfigOnStartup) { + // we have two ways to log the config. both have their pro and cons. + // full means we include default values. in such a case, it's hard to figure + // out what really the config settings are. + // the other method just uses the loaded `Config` object that doesn't have default + // values, but therefore needs a separate way to handle the rendering + logger.info( + "Starting up with resolved config:\n" + + (if (loadedConfig.monitoring.logging.logConfigWithDefaults) + loadedConfig.dumpString + else + CantonConfig.renderForLoggingOnStartup(finalConfig)) + ) + } + loadedConfig + } + + private def writeConfigToTmpFile(mergedUserConfigs: Config) = { + val tmp = File.newTemporaryFile("canton-config-error-", ".conf") + logger.error( + s"An error occurred after parsing a config file that was obtained by merging multiple config " + + s"files. The resulting merged-together config file, for which the error occurred, was written to '$tmp'." + ) + tmp + .write( + mergedUserConfigs + .root() + .render(CantonConfig.defaultConfigRenderer) + ) + .discard + } + + // verify that run script and bootstrap script aren't mixed + if (cliOptions.bootstrapScriptPath.isDefined) { + cliOptions.command match { + case Some(Command.RunScript(_)) => + logger.error("--bootstrap script and run script are mutually exclusive") + sys.exit(1) + case Some(Command.Generate(_)) => + logger.error("--bootstrap script and generate are mutually exclusive") + sys.exit(1) + case _ => + } + } + + private lazy val bootstrapScript: Option[CantonScript] = + cliOptions.bootstrapScriptPath + .map(CantonScriptFromFile) + + val runner: Runner[E] = cliOptions.command match { + case Some(Command.Daemon) => new ServerRunner(bootstrapScript, loggerFactory) + case Some(Command.RunScript(script)) => ConsoleScriptRunner(script, loggerFactory) + case Some(Command.Generate(target)) => + Generate.process(target, cantonConfig) + sys.exit(0) + case _ => + new ConsoleInteractiveRunner(cliOptions.noTty, bootstrapScript, loggerFactory) + } + + val environment = environmentFactory.create(cantonConfig, loggerFactory) + environmentRef.set(Some(environment)) // registering for graceful shutdown + environment.startAndReconnect(cliOptions.autoConnectLocal) match { + case Right(()) => + case Left(_) => sys.exit(1) + } + + runner.run(environment) + + def loadConfig(config: Config): Either[CantonConfigError, E#Config] + +} diff --git a/canton-3x/community/app/src/main/scala/com/digitalasset/canton/CantonCommunityApp.scala b/canton-3x/community/app/src/main/scala/com/digitalasset/canton/CantonCommunityApp.scala new file mode 100644 index 0000000000..36d6135aba --- /dev/null +++ b/canton-3x/community/app/src/main/scala/com/digitalasset/canton/CantonCommunityApp.scala @@ -0,0 +1,25 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton + +import com.digitalasset.canton.config.CantonCommunityConfig +import com.digitalasset.canton.config.ConfigErrors.CantonConfigError +import com.digitalasset.canton.environment.{ + CommunityEnvironment, + CommunityEnvironmentFactory, + EnvironmentFactory, +} +import com.typesafe.config.Config + +object CantonCommunityApp extends CantonAppDriver[CommunityEnvironment] { + + override def loadConfig(config: Config): Either[CantonConfigError, CantonCommunityConfig] = + CantonCommunityConfig.load(config) + + override protected def environmentFactory: EnvironmentFactory[CommunityEnvironment] = + CommunityEnvironmentFactory + + override protected def withManualStart(config: CantonCommunityConfig): CantonCommunityConfig = + config.copy(parameters = config.parameters.copy(manualStart = true)) +} diff --git a/canton-3x/community/app/src/main/scala/com/digitalasset/canton/Runner.scala b/canton-3x/community/app/src/main/scala/com/digitalasset/canton/Runner.scala new file mode 100644 index 0000000000..583f8f7f26 --- /dev/null +++ b/canton-3x/community/app/src/main/scala/com/digitalasset/canton/Runner.scala @@ -0,0 +1,186 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton + +import com.digitalasset.canton.console.{HeadlessConsole, InteractiveConsole} +import com.digitalasset.canton.environment.Environment +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} +import com.digitalasset.canton.tracing.{NoTracing, TraceContext} + +import java.io.{File, OutputStream, StringWriter} +import scala.io.Source +import scala.util.control.NonFatal + +/** Result for exposing the process exit code. + * All logging is expected to take place inside of the runner. + */ +trait Runner[E <: Environment] extends NamedLogging { + + def run(environment: E): Unit +} + +class ServerRunner[E <: Environment]( + bootstrapScript: Option[CantonScript] = None, + override val loggerFactory: NamedLoggerFactory, +) extends Runner[E] + with NoTracing { + + def run(environment: E): Unit = + try { + def start(): Unit = { + environment + .startAll() match { + case Right(_) => logger.info("Canton started") + case Left(error) => + logger.error(s"Canton startup encountered problems: $error") + // give up as we couldn't start everything successfully + sys.exit(1) + } + } + + def startWithBootstrap(script: CantonScript): Unit = + ConsoleScriptRunner.run(environment, script, logger = logger) match { + case Right(_unit) => logger.info("Bootstrap script successfully executed.") + case Left(err) => + logger.error(s"Bootstrap script terminated with an error: $err") + sys.exit(3) + } + + bootstrapScript.fold(start())(startWithBootstrap) + } catch { + case ex: Throwable => + logger.error(s"Unexpected error while running server: ${ex.getMessage}") + logger.info("Exception causing error is:", ex) + sys.exit(2) + } +} + +class ConsoleInteractiveRunner[E <: Environment]( + noTty: Boolean = false, + bootstrapScript: Option[CantonScript], + override val loggerFactory: NamedLoggerFactory, +) extends Runner[E] { + def run(environment: E): Unit = { + val success = + try { + val consoleEnvironment = environment.createConsole() + InteractiveConsole(consoleEnvironment, noTty, bootstrapScript, logger) + } catch { + case NonFatal(_) => false + } + sys.exit(if (success) 0 else 1) + } +} + +class ConsoleScriptRunner[E <: Environment]( + scriptPath: CantonScript, + override val loggerFactory: NamedLoggerFactory, +) extends Runner[E] { + private val Ok = 0 + private val Error = 1 + + override def run(environment: E): Unit = { + val exitCode = + ConsoleScriptRunner.run(environment, scriptPath, logger) match { + case Right(_unit) => + Ok + case Left(err) => + logger.error(s"Script execution failed: $err")(TraceContext.empty) + Error + } + + sys.exit(exitCode) + } +} + +private class CopyOutputWriter(parent: OutputStream, logger: TracedLogger) + extends OutputStream + with NoTracing { + val buf = new StringWriter() + override def write(b: Int): Unit = { + if (b == '\n') { + // strip the ansi color commands from the string + val output = buf.toString.replaceAll("\u001B\\[[;\\d]*m", "") + logger.info(s"Console stderr output: ${output}") + buf.getBuffer.setLength(0) + } else { + buf.write(b) + } + parent.write(b) + } +} + +sealed trait CantonScript { + def path: Option[File] + def read(): Either[HeadlessConsole.IoError, String] +} +final case class CantonScriptFromFile(scriptPath: File) extends CantonScript { + override val path = Some(scriptPath) + override def read(): Either[HeadlessConsole.IoError, String] = + readScript(scriptPath) + + private def readScript(scriptPath: File): Either[HeadlessConsole.IoError, String] = + for { + path <- verifyScriptCanBeRead(scriptPath) + content <- readScriptContent(path) + } yield content + + private def verifyScriptCanBeRead(scriptPath: File): Either[HeadlessConsole.IoError, File] = + Either.cond( + scriptPath.canRead, + scriptPath, + HeadlessConsole.IoError(s"Script file not readable: $scriptPath"), + ) + + private def readScriptContent(scriptPath: File): Either[HeadlessConsole.IoError, String] = { + val source = Source.fromFile(scriptPath) + try { + Right(source.mkString) + } catch { + case NonFatal(ex: Throwable) => + Left(HeadlessConsole.IoError(s"Failed to read script file: $ex")) + } finally { + source.close() + } + } +} + +object ConsoleScriptRunner extends NoTracing { + def apply[E <: Environment]( + scriptPath: File, + loggerFactory: NamedLoggerFactory, + ): ConsoleScriptRunner[E] = + new ConsoleScriptRunner[E](CantonScriptFromFile(scriptPath), loggerFactory) + def run[E <: Environment]( + environment: E, + scriptPath: File, + logger: TracedLogger, + ): Either[HeadlessConsole.HeadlessConsoleError, Unit] = + run(environment, CantonScriptFromFile(scriptPath), logger) + + def run[E <: Environment]( + environment: E, + cantonScript: CantonScript, + logger: TracedLogger, + ): Either[HeadlessConsole.HeadlessConsoleError, Unit] = { + val consoleEnvironment = environment.createConsole() + try { + for { + scriptCode <- cantonScript.read() + _ <- HeadlessConsole.run( + consoleEnvironment, + scriptCode, + cantonScript.path, + // clone error stream such that we also log the error message + // unfortunately, this means that if somebody outputs INFO to stdout, + // he will observe the error twice + transformer = x => x.copy(errorStream = new CopyOutputWriter(x.errorStream, logger)), + logger = logger, + ) + } yield () + } finally { + consoleEnvironment.closeChannels() + } + } +} diff --git a/canton-3x/community/app/src/main/scala/com/digitalasset/canton/cli/Cli.scala b/canton-3x/community/app/src/main/scala/com/digitalasset/canton/cli/Cli.scala new file mode 100644 index 0000000000..70b48e3ace --- /dev/null +++ b/canton-3x/community/app/src/main/scala/com/digitalasset/canton/cli/Cli.scala @@ -0,0 +1,387 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.cli + +import ch.qos.logback.classic.Level +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.buildinfo.BuildInfo +import scopt.OptionParser + +import java.io.File +import scala.annotation.nowarn + +sealed trait LogFileAppender + +object LogFileAppender { + object Rolling extends LogFileAppender + + object Flat extends LogFileAppender + + object Off extends LogFileAppender +} + +sealed trait LogEncoder + +object LogEncoder { + object Plain extends LogEncoder + + object Json extends LogEncoder +} + +/** CLI Options + * + * See the description for each argument in the CLI builder below. + */ +final case class Cli( + configFiles: Seq[File] = Seq(), + configMap: Map[String, String] = Map(), + command: Option[Command] = None, + noTty: Boolean = false, + levelRoot: Option[Level] = None, + levelCanton: Option[Level] = None, + levelStdout: Level = Level.WARN, + logFileAppender: LogFileAppender = LogFileAppender.Rolling, + logFileRollingPattern: Option[String] = None, + kmsLogFileRollingPattern: Option[String] = None, + logFileHistory: Option[Int] = None, + kmsLogFileHistory: Option[Int] = None, + logTruncate: Boolean = false, + logFileName: Option[String] = None, + kmsLogFileName: Option[String] = None, + logEncoder: LogEncoder = LogEncoder.Plain, + logLastErrors: Boolean = true, + logLastErrorsFileName: Option[String] = None, + logImmediateFlush: Option[Boolean] = None, + kmsLogImmediateFlush: Option[Boolean] = None, + bootstrapScriptPath: Option[File] = None, + manualStart: Boolean = false, + autoConnectLocal: Boolean = false, +) { + + /** sets the properties our logback.xml is looking for */ + def installLogging(): Unit = { + setLevel(levelRoot, "LOG_LEVEL_ROOT") + + // The root level can override the canton level if root is configured with a lower level + val overrideLevelCanton = (for { + root <- levelRoot + canton <- levelCanton + } yield root.levelInt < canton.levelInt).getOrElse(false) + if (overrideLevelCanton) + setLevel(levelRoot, "LOG_LEVEL_CANTON") + else + setLevel(levelCanton, "LOG_LEVEL_CANTON") + + setLevel(Some(levelStdout), "LOG_LEVEL_STDOUT") + + if (command.isEmpty && !noTty) { + // Inform logging that this is an interactive console running that needs some additional tweaks + // for a good logging experience + System.setProperty("INTERACTIVE_STDOUT", true.toString) + } + + System.setProperty("LOG_FILE_APPEND", (!logTruncate).toString) + + Seq( + "LOG_FILE_FLAT", + "LOG_FILE_ROLLING", + "LOG_FILE_NAME", + "KMS_LOG_FILE_NAME", + "LOG_FILE_ROLLING_PATTERN", + "KMS_LOG_FILE_ROLLING_PATTERN", + "LOG_FILE_HISTORY", + "KMS_LOG_FILE_HISTORY", + "LOG_LAST_ERRORS", + "LOG_LAST_ERRORS_FILE_NAME", + "LOG_FORMAT_JSON", + "LOG_IMMEDIATE_FLUSH", + "KMS_LOG_IMMEDIATE_FLUSH", + ).foreach(System.clearProperty(_).discard[String]) + logFileName.foreach(System.setProperty("LOG_FILE_NAME", _)) + kmsLogFileName.foreach(System.setProperty("KMS_LOG_FILE_NAME", _)) + logLastErrorsFileName.foreach(System.setProperty("LOG_LAST_ERRORS_FILE_NAME", _)) + logFileHistory.foreach(x => System.setProperty("LOG_FILE_HISTORY", x.toString)) + kmsLogFileHistory.foreach(x => System.setProperty("KMS_LOG_FILE_HISTORY", x.toString)) + logFileRollingPattern.foreach(System.setProperty("LOG_FILE_ROLLING_PATTERN", _)) + kmsLogFileRollingPattern.foreach(System.setProperty("KMS_LOG_FILE_ROLLING_PATTERN", _)) + logFileAppender match { + case LogFileAppender.Rolling => + System.setProperty("LOG_FILE_ROLLING", "true").discard + case LogFileAppender.Flat => + System.setProperty("LOG_FILE_FLAT", "true").discard + case LogFileAppender.Off => + } + if (logLastErrors) + System.setProperty("LOG_LAST_ERRORS", "true").discard + + logEncoder match { + case LogEncoder.Plain => + case LogEncoder.Json => + System.setProperty("LOG_FORMAT_JSON", "true").discard + } + + logImmediateFlush.foreach(f => System.setProperty("LOG_IMMEDIATE_FLUSH", f.toString)) + kmsLogImmediateFlush.foreach(f => System.setProperty("KMS_LOG_IMMEDIATE_FLUSH", f.toString)) + } + + private def setLevel(levelO: Option[Level], name: String): Unit = { + val _ = levelO match { + case Some(level) => System.setProperty(name, level.levelStr) + case None => System.clearProperty(name) + } + } + +} + +@nowarn(raw"msg=unused value of type .* \(add `: Unit` to discard silently\)") +object Cli { + // The `additionalVersions` parameter allows the enterprise CLI to output the version of additional, + // enterprise-only dependencies (see `CantonAppDriver`). + def parse(args: Array[String], printVersion: => Unit = ()): Option[Cli] = + parser(printVersion).parse(args, Cli()) + + private def parser(printVersion: => Unit): OptionParser[Cli] = + new scopt.OptionParser[Cli]("canton") { + + private def inColumns(first: String = "", second: String = ""): String = + f" $first%-25s$second" + + head("Canton", s"v${BuildInfo.version}") + + help('h', "help").text("Print usage") + opt[Unit]("version") + .text("Print versions") + .action { (_, _) => + printVersion.discard + sys.exit(0) + } + + opt[Seq[File]]('c', "config") + .text( + "Set configuration file(s).\n" + + inColumns(second = "If several configuration files assign values to the same key,\n") + + inColumns(second = "the last value is taken.") + ) + .valueName(",,...") + .unbounded() + .action((files, cli) => cli.copy(configFiles = cli.configFiles ++ files)) + + opt[Map[String, String]]('C', "config key-value's") + .text( + "Set configuration key value pairs directly.\n" + + inColumns(second = "Can be useful for providing simple short config info.") + ) + .valueName("=,=") + .unbounded() + .action { (map, cli) => + cli.copy(configMap = + map ++ cli.configMap + ) // the values on the right of the ++ operator are preferred for the same key. thus in case of repeated keys, the first defined is taken. + } + + opt[File]("bootstrap") + .text("Set a script to run on startup") + .valueName("") + .action((script, cli) => cli.copy(bootstrapScriptPath = Some(script))) + + opt[Unit]("no-tty") + .text("Do not use a tty") + .action((_, cli) => cli.copy(noTty = true)) + + opt[Unit]("manual-start") + .text("Don't automatically start the nodes") + .action((_, cli) => cli.copy(manualStart = true)) + + opt[Unit]("auto-connect-local") + .text("Automatically connect all local participants to all local domains") + .action((_, cli) => cli.copy(autoConnectLocal = true)) + + note(inColumns(first = "-D=", second = "Set a JVM property value")) + + note("\nLogging Options:") // Enforce a newline in the help text + + opt[Unit]('v', "verbose") + .text("Canton logger level -> DEBUG") + .action((_, cli) => cli.copy(levelCanton = Some(Level.DEBUG))) + + opt[Unit]("debug") + .text("Console/stdout level -> INFO, root logger -> DEBUG") + .action((_, cli) => + cli.copy( + levelRoot = Some(Level.DEBUG), + levelCanton = Some(Level.DEBUG), + levelStdout = Level.INFO, + ) + ) + + opt[Unit]("log-truncate") + .text("Truncate log file on startup.") + .action((_, cli) => cli.copy(logTruncate = true)) + + implicit val levelRead: scopt.Read[Level] = scopt.Read.reads(Level.valueOf) + opt[Level]("log-level-root") + .text("Log-level of the root logger") + .valueName("") + .action((level, cli) => cli.copy(levelRoot = Some(level), levelCanton = Some(level))) + + opt[Level]("log-level-canton") + .text("Log-level of the Canton logger") + .valueName("") + .action((level, cli) => cli.copy(levelCanton = Some(level))) + + opt[Level]("log-level-stdout") + .text("Log-level of stdout") + .valueName("") + .action((level, cli) => cli.copy(levelStdout = level)) + + opt[String]("log-file-appender") + .text("Type of log file appender") + .valueName("rolling(default)|flat|off") + .action((typ, cli) => + typ.toLowerCase match { + case "rolling" => cli.copy(logFileAppender = LogFileAppender.Rolling) + case "off" => cli.copy(logFileAppender = LogFileAppender.Off) + case "flat" => cli.copy(logFileAppender = LogFileAppender.Flat) + case _ => + throw new IllegalArgumentException( + s"Invalid command line argument: unknown log-file-appender $typ" + ) + } + ) + + opt[String]("log-file-name") + .text("Name and location of log-file, default is log/canton.log") + .action((name, cli) => cli.copy(logFileName = Some(name))) + + opt[String]("kms-log-file-name") + .text("Name and location of KMS log-file, default is log/canton_kms.log") + .action((name, cli) => cli.copy(kmsLogFileName = Some(name))) + + opt[Int]("log-file-rolling-history") + .text("Number of history files to keep when using rolling log file appender.") + .action((history, cli) => cli.copy(logFileHistory = Some(history))) + + opt[Int]("kms-log-file-rolling-history") + .text("Number of history KMS files to keep when using rolling log file appender.") + .action((history, cli) => cli.copy(kmsLogFileHistory = Some(history))) + + opt[String]("log-file-rolling-pattern") + .text("Log file suffix pattern of rolling file appender. Default is 'yyyy-MM-dd'.") + .action((pattern, cli) => cli.copy(logFileRollingPattern = Some(pattern))) + + opt[String]("kms-log-file-rolling-pattern") + .text("KMS log file suffix pattern of rolling file appender. Default is 'yyyy-MM-dd'.") + .action((pattern, cli) => cli.copy(kmsLogFileRollingPattern = Some(pattern))) + + opt[String]("log-encoder") + .text("Log encoder: plain|json") + .action { + case ("json", cli) => cli.copy(logEncoder = LogEncoder.Json) + case ("plain", cli) => cli.copy(logEncoder = LogEncoder.Plain) + case (other, _) => + throw new IllegalArgumentException(s"Unsupported logging encoder $other") + } + + opt[Boolean]("log-immediate-flush") + .text( + """Determines whether to immediately flush log output to the log file. + |Enable to avoid an incomplete log file in case of a crash. + |Disable to reduce the load on the disk caused by logging.""".stripMargin + ) + .valueName("true(default)|false") + .action((enabled, cli) => cli.copy(logImmediateFlush = Some(enabled))) + + opt[Boolean]("kms-log-immediate-flush") + .text( + """Determines whether to immediately flush KMS log output to the KMS log file. + |Enable to avoid an incomplete log file in case of a crash. + |Disable to reduce the load on the disk caused by logging.""".stripMargin + ) + .valueName("true(default)|false") + .action((enabled, cli) => cli.copy(kmsLogImmediateFlush = Some(enabled))) + + opt[String]("log-profile") + .text("Preconfigured logging profiles: (container)") + .action((profile, cli) => + profile.toLowerCase match { + case "container" => + cli.copy( + logFileAppender = LogFileAppender.Rolling, + logFileHistory = Some(10), + logFileRollingPattern = Some("yyyy-MM-dd-HH"), + levelStdout = Level.DEBUG, + ) + case _ => throw new IllegalArgumentException(s"Unknown log profile $profile") + } + ) + + opt[Boolean]("log-last-errors") + .text("Capture events for logging.last_errors command") + .action((isEnabled, cli) => cli.copy(logLastErrors = isEnabled)) + + note("") // Enforce a newline in the help text + + note("Use the JAVA_OPTS environment variable to set JVM parameters.") + + note("") // Enforce a newline in the help text + + cmd("daemon") + .text( + "Start all nodes automatically and run them without having a console (REPL).\n" + + "Nodes can be controlled through the admin API." + ) + .action((_, cli) => cli.copy(command = Some(Command.Daemon))) + .children() + + note("") // Enforce a newline in the help text + + cmd("run") + .text( + "Run a console script.\n" + + "Stop all nodes when the script has terminated." + ) + .children( + arg[File]("") + .text("the script to run") + .action((script, cli) => cli.copy(command = Some(Command.RunScript(script)))) + ) + + note("") // Enforce a newline in the help text + + implicit val readTarget: scopt.Read[Command.Generate.Target] = scopt.Read.reads { + case "remote-config" => Command.Generate.RemoteConfig + case x => throw new IllegalArgumentException(s"Unknown target $x") + } + cmd("generate") + .text("Generate configurations") + .children( + arg[Command.Generate.Target]("") + .text("generation target (remote-config)") + .action((target, cli) => cli.copy(command = Some(Command.Generate(target)))) + ) + + checkConfig(cli => + if (cli.configFiles.isEmpty && cli.configMap.isEmpty) { + failure( + "at least one config has to be defined either as files (-c) or as key-values (-C)" + ) + } else success + ) + + checkConfig(cli => + if ( + cli.autoConnectLocal && cli.command.exists { + case Command.Daemon => false + case Command.RunScript(_) => true + case Command.Generate(_) => true + } + ) { + failure(s"auto-connect-local does not work with run-script or generate") + } else success + ) + + override def showUsageOnError: Option[Boolean] = Some(true) + + } +} diff --git a/canton-3x/community/app/src/main/scala/com/digitalasset/canton/cli/Command.scala b/canton-3x/community/app/src/main/scala/com/digitalasset/canton/cli/Command.scala new file mode 100644 index 0000000000..394c4d72b1 --- /dev/null +++ b/canton-3x/community/app/src/main/scala/com/digitalasset/canton/cli/Command.scala @@ -0,0 +1,29 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.cli + +import java.io.File + +sealed trait Command {} + +object Command { + + /** Run the process as a server (rather than an interactive repl) + */ + object Daemon extends Command + + /** Run a console script then close + * + * @param scriptPath the path to the script + */ + final case class RunScript(scriptPath: File) extends Command + + final case class Generate(target: Generate.Target) extends Command + + object Generate { + sealed trait Target + + object RemoteConfig extends Target + } +} diff --git a/canton-3x/community/app/src/main/scala/com/digitalasset/canton/config/Generate.scala b/canton-3x/community/app/src/main/scala/com/digitalasset/canton/config/Generate.scala new file mode 100644 index 0000000000..fb3bc890d8 --- /dev/null +++ b/canton-3x/community/app/src/main/scala/com/digitalasset/canton/config/Generate.scala @@ -0,0 +1,42 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +import better.files.{File as BFile} +import com.digitalasset.canton.cli.Command +import com.digitalasset.canton.environment.Environment +import pureconfig.ConfigWriter + +object Generate { + + private def write[A](name: String, prefix: String, config: A)(implicit + configWriter: ConfigWriter[A] + ): Unit = { + val _ = BFile(s"remote-${name}.conf") + .write( + s"canton.remote-${prefix}.${name} {" + System.lineSeparator() + configWriter + .to(config) + .render(CantonConfig.defaultConfigRenderer) + System.lineSeparator() + "}" + System + .lineSeparator() + ) + } + + def process[E <: Environment](command: Command.Generate.Target, config: E#Config): Unit = + command match { + case Command.Generate.RemoteConfig => + val writers = new CantonConfig.ConfigWriters(confidential = false) + import writers.* + config.participantsByString.map(x => (x._1, x._2.toRemoteConfig)).foreach { + case (name, config) => + write(name, "participants", config) + } + + config.domainsByString.map(x => (x._1, x._2.toRemoteConfig)).foreach { + case (name, config) => + write(name, "domains", config) + } + + } + +} diff --git a/canton-3x/community/app/src/main/scala/com/digitalasset/canton/console/BindingsBridge.scala b/canton-3x/community/app/src/main/scala/com/digitalasset/canton/console/BindingsBridge.scala new file mode 100644 index 0000000000..570f05f666 --- /dev/null +++ b/canton-3x/community/app/src/main/scala/com/digitalasset/canton/console/BindingsBridge.scala @@ -0,0 +1,12 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import ammonite.interp.api.APIHolder +import ammonite.util.Bind + +/** ammonite requires a ApiHolder in this pattern to make items through bindings available within the dynamic Console environment. + */ +final case class BindingsHolder(bindings: IndexedSeq[Bind[_]]) +object BindingsBridge extends APIHolder[BindingsHolder] diff --git a/canton-3x/community/app/src/main/scala/com/digitalasset/canton/console/HeadlessConsole.scala b/canton-3x/community/app/src/main/scala/com/digitalasset/canton/console/HeadlessConsole.scala new file mode 100644 index 0000000000..677af5dd35 --- /dev/null +++ b/canton-3x/community/app/src/main/scala/com/digitalasset/canton/console/HeadlessConsole.scala @@ -0,0 +1,259 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import ammonite.Main +import ammonite.interp.Interpreter +import ammonite.runtime.Frame +import ammonite.util.Res.{Exception, Failing, Failure, Success} +import ammonite.util.* +import cats.syntax.either.* +import com.digitalasset.canton.console.HeadlessConsole.{ + HeadlessConsoleError, + convertAmmoniteResult, + createInterpreter, + initializePredef, + runCode, +} +import com.digitalasset.canton.logging.TracedLogger +import com.digitalasset.canton.tracing.NoTracing +import com.digitalasset.canton.util.ErrorUtil +import os.PathConvertible.* + +import java.io.File +import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} +import scala.util.Try + +class HeadlessConsole( + consoleEnvironment: ConsoleEnvironment, + transformer: Main => Main = identity, + logger: TracedLogger, +) extends AutoCloseable { + + val (lock, baseOptions) = + AmmoniteConsoleConfig.create( + consoleEnvironment.environment.config.parameters.console, + predefCode = "", + welcomeBanner = None, + isRepl = false, + logger, + ) + + private val interpreterO = new AtomicReference[Option[Interpreter]](None) + private val currentLine = new AtomicInteger(10000000) + + def init(): Either[HeadlessConsoleError, Unit] = { + val options = transformer(baseOptions) + for { + interpreter <- Try(createInterpreter(options)).toEither.leftMap( + HeadlessConsole.RuntimeError("Failed to initialize console", _) + ) + bindings <- consoleEnvironment.bindings.leftMap( + HeadlessConsole.RuntimeError("Unable to create the console bindings", _) + ) + _ <- initializePredef( + interpreter, + bindings, + consoleEnvironment.predefCode(_), + logger, + ) + } yield { + interpreterO.set(Some(interpreter)) + } + } + + private def runModule( + code: String, + path: Option[File] = None, + ): Either[HeadlessConsoleError, Unit] = + for { + interpreter <- interpreterO + .get() + .toRight(HeadlessConsole.CompileError("Interpreter is not initialized")) + _ <- runCode(interpreter, code, path, logger) + } yield () + + def runLine(line: String): Either[HeadlessConsoleError, Unit] = for { + interpreter <- interpreterO + .get() + .toRight(HeadlessConsole.CompileError("Interpreter is not initialized")) + _ <- convertAmmoniteResult( + interpreter + .processExec(line, currentLine.incrementAndGet(), () => ()), + logger, + ) + } yield () + + override def close(): Unit = { + lock.release() + } +} + +/** Creates an interpreter but with matching bindings to the InteractiveConsole for running scripts non-interactively + */ +@SuppressWarnings(Array("org.wartremover.warts.Any")) +object HeadlessConsole extends NoTracing { + + sealed trait HeadlessConsoleError + + final case class CompileError(message: String) extends HeadlessConsoleError { + override def toString: String = message + } + + final case class IoError(message: String) extends HeadlessConsoleError { + override def toString: String = message + } + + final case class RuntimeError(message: String, cause: Throwable) extends HeadlessConsoleError { + override def toString: String = { + val messageWithSeparator = if (message.isEmpty) "" else message + " " + val exceptionInfo = ErrorUtil.messageWithStacktrace(cause) + messageWithSeparator + exceptionInfo + } + } + + def run( + consoleEnvironment: ConsoleEnvironment, + code: String, + path: Option[File] = None, + transformer: Main => Main = identity, + logger: TracedLogger, + ): Either[HeadlessConsoleError, Unit] = { + val console = new HeadlessConsole(consoleEnvironment, transformer, logger) + try { + for { + _ <- console.init() + _ <- console.runModule(code, path) + } yield () + } finally { + console.close() + } + } + + private def initializePredef( + interpreter: Interpreter, + bindings: IndexedSeq[Bind[_]], + interactivePredef: Boolean => String, + logger: TracedLogger, + ): Either[HeadlessConsoleError, Unit] = { + val bindingsPredef = generateBindPredef(bindings) + + val holder = Seq( + ( + // This has to match the object name of the implementation that extends APIHolder[_} + objectClassNameWithoutSuffix(BindingsBridge.getClass), + "canton", + BindingsHolder(bindings), + ) + ) + + val result = interpreter.initializePredef( + basePredefs = Seq( + PredefInfo(Name("BindingsPredef"), bindingsPredef, hardcoded = false, None), + PredefInfo( + Name("CantonImplicitPredef"), + interactivePredef(false), + hardcoded = false, + None, + ), + ), + customPredefs = Seq(), + extraBridges = holder, + ) + + // convert to an either and then map error if set to our own types + result.toLeft(()).left.map(err => convertAmmoniteError(err._1, logger)) + } + + private def runCode( + interpreter: Interpreter, + code: String, + path: Option[File], + logger: TracedLogger, + ): Either[HeadlessConsoleError, Unit] = { + // the source details for our wrapper object that our code is compiled into + val source = Util.CodeSource( + wrapperName = Name("canton-script"), + flexiblePkgName = Seq(Name("interpreter")), + pkgRoot = Seq(Name("ammonite"), Name("canton")), // has to be rooted under ammonite + path.map(path => os.Path(path.getAbsolutePath)), + ) + + val result = interpreter.processModule(code, source, autoImport = false, "", hardcoded = false) + + convertAmmoniteResult(result, logger) + } + + /** Converts a return value from Ammonite into: + * - Unit if successful + * - Our own error hierarchy if the Ammonite error could be mapped + * @throws java.lang.RuntimeException If the value is unknown + */ + private def convertAmmoniteResult( + result: Res[_], + logger: TracedLogger, + ): Either[HeadlessConsoleError, Unit] = + result match { + case Success(_) => + Right(()) + case failing: Failing => Left(convertAmmoniteError(failing, logger)) + case unexpected => + logger.error("Unexpected result from ammonite: {}", unexpected) + sys.error("Unexpected result from ammonite") + } + + /** Converts a failing return value from Ammonite into our own error types. + * @throws java.lang.RuntimeException If the failing error is unknown + */ + private def convertAmmoniteError(result: Failing, logger: TracedLogger): HeadlessConsoleError = + result match { + case Failure(msg) => CompileError(msg) + case Exception(cause, msg) => RuntimeError(msg, cause) + case unexpected => + logger.error("Unexpected error result from ammonite: {}", unexpected) + sys.error("Unexpected error result from ammonite") + } + + @SuppressWarnings(Array("org.wartremover.warts.Null")) + private def createInterpreter(options: Main): Interpreter = { + val (colorsRef, printer) = Interpreter.initPrinters( + options.colors, + options.outputStream, + options.errorStream, + options.verboseOutput, + ) + val frame = Frame.createInitial() + + new Interpreter( + compilerBuilder = ammonite.compiler.CompilerBuilder, + parser = ammonite.compiler.Parsers, + printer = printer, + storage = options.storageBackend, + wd = options.wd, + colors = colorsRef, + verboseOutput = options.verboseOutput, + getFrame = () => frame, + createFrame = () => sys.error("Session loading / saving is not supported"), + initialClassLoader = null, + replCodeWrapper = options.replCodeWrapper, + scriptCodeWrapper = options.scriptCodeWrapper, + alreadyLoadedDependencies = options.alreadyLoadedDependencies, + ) + } + + private def generateBindPredef(binds: IndexedSeq[Bind[_]]): String = + binds.zipWithIndex + .map { case (b, idx) => + s""" + |val ${b.name} = com.digitalasset.canton.console + | .BindingsBridge + | .value0 + | .bindings($idx) + | .value + | .asInstanceOf[${b.typeTag.tpe}] + """.stripMargin + } + .mkString(System.lineSeparator) + +} diff --git a/canton-3x/community/app/src/main/scala/com/digitalasset/canton/console/InteractiveConsole.scala b/canton-3x/community/app/src/main/scala/com/digitalasset/canton/console/InteractiveConsole.scala new file mode 100644 index 0000000000..f34160be24 --- /dev/null +++ b/canton-3x/community/app/src/main/scala/com/digitalasset/canton/console/InteractiveConsole.scala @@ -0,0 +1,162 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import ammonite.compiler.Parsers +import ammonite.interp.Watchable +import ammonite.util.{Res, *} +import com.digitalasset.canton.CantonScript +import com.digitalasset.canton.logging.TracedLogger +import com.digitalasset.canton.tracing.NoTracing +import com.digitalasset.canton.util.ResourceUtil.withResource + +import java.io.{File, InputStream} +import java.lang.System.lineSeparator +import scala.io.Source +import scala.util.Try + +/** Will create a real REPL for interactive entry and evaluation of commands + */ +@SuppressWarnings(Array("org.wartremover.warts.Any")) +object InteractiveConsole extends NoTracing { + def apply( + consoleEnvironment: ConsoleEnvironment, + noTty: Boolean = false, + bootstrapScript: Option[CantonScript] = None, + logger: TracedLogger, + ): Boolean = { + + val (_lock, baseOptions) = AmmoniteConsoleConfig.create( + consoleEnvironment.environment.config.parameters.console, + // for including implicit conversions + predefCode = + consoleEnvironment.predefCode(interactive = true, noTty = noTty) + lineSeparator(), + welcomeBanner = Some(loadBanner()), + isRepl = true, + logger, + ) + // where are never going to release the lock here + + val options = baseOptions + + // instead of using Main.run() from ammonite, we "inline" + // that code here as "startup" in order to include the + // bootstrap script in the beginning + // the issue is that most bootstrap scripts require the bound repl arguments + // (such as all, help, participant1, etc.), which are made available only here + // so we can't run Main.runScript or so as the "result" of the script are lost then + // in the REPL. + def startup(replArgs: IndexedSeq[Bind[_]]): (Res[Any], Seq[(Watchable, Long)]) = { + options.instantiateRepl(replArgs) match { + case Left(missingPredefInfo) => missingPredefInfo + case Right(repl) => + repl.initializePredef().getOrElse { + // warm up the compilation + val warmupThread = new Thread(() => { + val _ = repl.warmup() + }) + warmupThread.setDaemon(true) + warmupThread.start() + // load and run bootstrap script + val initRes = bootstrapScript.map(fname => { + // all we do is to write interp.load.module(...) into the console and let it interpret it + // the lines here are stolen from Repl.warmup() + logger.info(s"Running startup script $fname") + val loadModuleCode = fname.path + .map { (f: File) => + // Try to move the script to a temp file, otherwise the name of the file can shadow scala variables in the script + Try { + val tmp = better.files.File.newTemporaryFile() + better.files.File(f.getAbsolutePath).copyTo(tmp, overwrite = true) + logger.debug( + s"Copied ${f.getAbsolutePath} to temporary file ${tmp.pathAsString}" + ) + tmp.toJava + }.fold( + { e => + logger.debug( + s"Could not copy boostrap script to temp file, using original file", + e, + ) + f + }, + identity, + ) + } + .map(p => "interp.load.module(os.Path(" + toStringLiteral(p.getAbsolutePath) + "))") + .getOrElse(fname.read().getOrElse("")) + val stmts = Parsers + .split(loadModuleCode) + .getOrElse( + sys.error("Expected parser to always return a success or failure") + ) match { // `Parsers.split` returns an Option but should always be Some as we always provide code + case Left(error) => sys.error(s"Unable to parse code: $error") + case Right(parsed) => parsed + } + // if we run this with currentLine = 0, it will break the console output + repl.interp.processLine(loadModuleCode, stmts, 10000000, silent = true, () => ()) + }) + + // now run the repl or exit if the bootstrap script failed + initRes match { + case Some(Res.Success(_)) | None => + val exitValue = Res.Success(repl.run()) + (exitValue.map(repl.beforeExit), repl.interp.watchedValues.toSeq) + case Some(a @ Res.Exception(x, y)) => + val additionalMessage = if (y.isEmpty) "" else s", $y" + logger.error( + s"Running bootstrap script failed with an exception (${x.getMessage}$additionalMessage)!" + ) + logger.debug("Ammonite exception thrown is", x) + (a, repl.interp.watchedValues.toSeq) + case Some(x) => + logger.error(s"Running bootstrap script failed with ${x}") + (x, repl.interp.watchedValues.toSeq) + } + } + } + } + + consoleEnvironment.bindings match { + case Left(exception) => + System.err.println(exception.getMessage) + logger.debug("Unable to initialize the console bindings", exception) + false + case Right(bindings) => + val (result, _) = startup(bindings) + result match { + // as exceptions are caught when in the REPL this is almost certainly from code in the predef + case Res.Exception(exception, _) => + System.err.println(exception.getMessage) + logger.debug("Execution of interactive script returned exception", exception) + false + case Res.Failure(err) => + System.err.println(err) + logger.debug(s"Execution of interactive script returned failure ${err}") + false + case _ => + true + } + } + } + + /** Turns the given String into a string literal suitable for including in scala code. + * Includes adding surrounding quotes. + * e.g. `some\\path` will return `"some\\\\path"` + */ + private def toStringLiteral(raw: String): String = { + // uses the scala reflection primitives but doesn't actually do any reflection + import scala.reflect.runtime.universe.* + + Literal(Constant(raw)).toString() + } + + private def loadBanner(): String = { + val stream: InputStream = Option(getClass.getClassLoader.getResourceAsStream("repl/banner.txt")) + .getOrElse(sys.error("banner resource not found")) + + withResource(stream) { Source.fromInputStream(_).mkString } + } + +} diff --git a/canton-3x/community/app/src/pack/bin/canton b/canton-3x/community/app/src/pack/bin/canton new file mode 100755 index 0000000000..bbf7ee6447 --- /dev/null +++ b/canton-3x/community/app/src/pack/bin/canton @@ -0,0 +1,149 @@ +#!/bin/sh +#/*-------------------------------------------------------------------------- +# * Copyright 2012 Taro L. Saito +# * +# * Licensed under the Apache License, Version 2.0 (the "License"); +# * you may not use this file except in compliance with the License. +# * You may obtain a copy of the License at +# * +# * http://www.apache.org/licenses/LICENSE-2.0 +# * +# * Unless required by applicable law or agreed to in writing, software +# * distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. +# *--------------------------------------------------------------------------*/ + +if [ -z "$PROG_HOME" ] ; then + ## resolve links - $0 may be a link to PROG_HOME + PRG="$0" + + # need this for relative symlinks + while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG="`dirname "$PRG"`/$link" + fi + done + + saveddir=`pwd` + + PROG_HOME=`dirname "$PRG"`/.. + + # make it fully qualified + PROG_HOME=`cd "$PROG_HOME" && pwd` + + cd "$saveddir" +fi + + +cygwin=false +mingw=false +darwin=false +case "`uname`" in + CYGWIN*) cygwin=true + ;; + MINGW*) mingw=true + ;; + Darwin*) darwin=true + if [ -z "$JAVA_VERSION" ] ; then + JAVA_VERSION="CurrentJDK" + else + echo "Using Java version: $JAVA_VERSION" 1>&2 + fi + if [ -z "$JAVA_HOME" ] ; then + JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/${JAVA_VERSION}/Home + fi + JAVA_OPTS="$JAVA_OPTS -Xdock:name=\"${PROG_NAME}\" -Xdock:icon=\"$PROG_HOME/REPLACE_MAC_ICON_FILE\" -Dapple.laf.useScreenMenuBar=true" + JAVACMD="`which java`" + ;; +esac + +# Resolve JAVA_HOME from javac command path +if [ -z "$JAVA_HOME" ]; then + javaExecutable="`which javac`" + if [ -n "$javaExecutable" -a -f "$javaExecutable" -a ! "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=`which readlink` + if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then + javaExecutable="`readlink -f \"$javaExecutable\"`" + javaHome="`dirname \"$javaExecutable\"`" + javaHome=`expr "$javaHome" : '\(.*\)/bin'` + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + + +if [ -z "$JAVACMD" ] ; then + if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="`which java`" + fi +fi + +if [ ! -x "$JAVACMD" ] ; then + echo "Error: JAVA_HOME is not defined correctly." 1>&2 + echo " We cannot execute $JAVACMD" 1>&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ] ; then + echo "Warning: JAVA_HOME environment variable is not set." 1>&2 +fi + +# Path separator used by EXTRA_CLASSPATH +CLASSPATH_SEPARATOR=":" + +# For Cygwin, switch paths to Windows-mixed format before running java +if $cygwin; then + [ -n "$PROG_HOME" ] && + PROG_HOME=`cygpath -am "$PROG_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath -am "$JAVA_HOME"` + CLASSPATH_SEPARATOR=";" +fi + +# For Migwn, ensure paths are in UNIX format before anything is touched +if $mingw ; then + [ -n "$PROG_HOME" ] && + PROG_HOME="`(cd "$PROG_HOME"; pwd -W | sed 's|/|\\\\|g')`" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="`(cd "$JAVA_HOME"; pwd -W | sed 's|/|\\\\|g')`" + CLASSPATH_SEPARATOR=";" +fi + + +PROG_NAME=REPLACE_PROG_NAME +PROG_VERSION=REPLACE_VERSION +PROG_REVISION=REPLACE_REVISION + +for arg do + shift + case $arg in + -D*) JAVA_OPTS="$JAVA_OPTS $arg" ;; + *) set -- "$@" "$arg" ;; + esac +done + +eval exec "\"$JAVACMD\"" \ + REPLACE_JVM_OPTS \ + ${JAVA_OPTS} \ + -Dprog.home="'${PROG_HOME}'" \ + -Dprog.version="${PROG_VERSION}" \ + -Dprog.revision="${PROG_REVISION}" \ + -cp \"${PROG_HOME}/REPLACE_JAR${CLASSPATH_SEPARATOR}${EXTRA_CLASSPATH}\" \ + REPLACE_MAIN_CLASS \ + \"\$@\" +exit $? diff --git a/canton-3x/community/app/src/pack/bin/canton.bat b/canton-3x/community/app/src/pack/bin/canton.bat new file mode 100644 index 0000000000..2646d76710 --- /dev/null +++ b/canton-3x/community/app/src/pack/bin/canton.bat @@ -0,0 +1,117 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM based on sbt-pack launch script +@REM ---------------------------------------------------------------------------- + +@echo off + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set HOME=%HOMEDRIVE%%HOMEPATH%) + +set ERROR_CODE=0 + +@REM set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" @setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +for /f %%j in ("java.exe") do ( + set JAVA_EXE="%%~$PATH:j" + goto init +) + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" ( + SET JAVA_EXE="%JAVA_HOME%\bin\java.exe" + goto init +) + +echo. 1>&2 +echo ERROR: JAVA_HOME is set to an invalid directory. 1>&2 +echo JAVA_HOME = %JAVA_HOME% 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation 1>&2 +echo. 1>&2 +goto error + +:init +@REM Decide how to startup depending on the version of windows + +@REM -- Win98ME +if NOT "%OS%"=="Windows_NT" goto Win9xArg + +@REM -- 4NT shell +if "%@eval[2+2]" == "4" goto 4NTArgs + +@REM -- Regular WinNT shell +set CMD_LINE_ARGS=%* +goto endInit + +@REM The 4NT Shell from jp software +:4NTArgs +set CMD_LINE_ARGS=%$ +goto endInit + +:Win9xArg +@REM Slurp the command line arguments. This loop allows for an unlimited number +@REM of agruments (up to the command line limit, anyway). +set CMD_LINE_ARGS= +:Win9xApp +if %1a==a goto endInit +set CMD_LINE_ARGS=%CMD_LINE_ARGS% %1 +shift +goto Win9xApp + +@REM Reaching here means variables are defined and arguments have been captured +:endInit + +SET PROG_HOME=%~dp0.. +SET PSEP=; + +@REM Start Java program +:runm2 +SET CMDLINE=%JAVA_EXE% REPLACE_JVM_OPTS %JAVA_OPTS% -Dprog.home="%PROG_HOME%" -Dprog.version="REPLACE_VERSION" -Dprog.revision="REPLACE_REVISION" -cp %PROG_HOME%\REPLACE_JAR;%EXTRA_CLASSPATH% REPLACE_MAIN_CLASS %CMD_LINE_ARGS% +%CMDLINE% +if ERRORLEVEL 1 goto error +goto end + +:error +if "%OS%"=="Windows_NT" @endlocal +set ERROR_CODE=1 + +:end +@REM set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" goto endNT + +@REM For old DOS remove the set variables from ENV - we assume they were not set +@REM before we started - at least we don't leave any baggage around +set JAVA_EXE= +set CMD_LINE_ARGS= +set CMDLINE= +set PSEP= +goto postExec + +:endNT +@endlocal + +:postExec +exit /B %ERROR_CODE% diff --git a/canton-3x/community/app/src/pack/examples/01-simple-topology/README.md b/canton-3x/community/app/src/pack/examples/01-simple-topology/README.md new file mode 100644 index 0000000000..96ea907794 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/01-simple-topology/README.md @@ -0,0 +1,16 @@ +# Simple Topology Example + +The simple topology example features a simple setup, starting two participants named `participant1` +and `participant2`, and a domain named `mydomain` in a single process. + +How to run the example is featured in the [getting started tutorial]( +https://docs.daml.com/canton/tutorials/getting_started.html#starting-canton). + +The second file contains a set of Canton console commands that are run in order to connect the participants together +and test the connection. + +The simple topology example can be invoked using + +``` + ../../bin/canton -c simple-topology.conf --bootstrap simple-ping.canton +``` \ No newline at end of file diff --git a/canton-3x/community/app/src/pack/examples/01-simple-topology/simple-ping.canton b/canton-3x/community/app/src/pack/examples/01-simple-topology/simple-ping.canton new file mode 100644 index 0000000000..6dd6485e6e --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/01-simple-topology/simple-ping.canton @@ -0,0 +1,33 @@ +// start all local instances defined in the configuration file +nodes.local.start() + +// Connect participant1 to mydomain using the connect macro. +// The connect macro will inspect the domain configuration to find the correct URL and Port. +// The macro is convenient for local testing, but obviously doesn't work in a distributed setup. +participant1.domains.connect_local(mydomain) + +val mydomainPort = Option(System.getProperty("canton-examples.mydomain-port")).getOrElse("5018") + +// Connect participant2 to mydomain using just the target URL and a local name we use to refer to this particular +// connection. This is actually everything Canton requires and this second type of connect call can be used +// in order to connect to a remote Canton domain. +// +// The connect call is just a wrapper that invokes the `domains.register`, `domains.get_agreement` and `domains.accept_agreement` calls. +// +// The address can be either HTTP or HTTPS. From a security perspective, we do assume that we either trust TLS to +// initially introduce the domain. If we don't trust TLS for that, we can also optionally include a so called +// EssentialState that establishes the trust of the participant to the domain. +// Whether a domain will let a participant connect or not is at the discretion of the domain and can be configured +// there. While Canton establishes the connection, we perform a handshake, exchanging keys, authorizing the connection +// and verifying version compatibility. +participant2.domains.connect("mydomain", s"http://localhost:$mydomainPort") + +// The above connect operation is asynchronous. It is generally at the discretion of the domain +// to decide if a participant can join and when. Therefore, we need to asynchronously wait here +// until the participant observes its activation on the domain. As the domain is configured to be +// permissionless in this example, the approval will be granted immediately. +utils.retry_until_true { + participant2.domains.active("mydomain") +} + +participant2.health.ping(participant1) diff --git a/canton-3x/community/app/src/pack/examples/01-simple-topology/simple-topology.conf b/canton-3x/community/app/src/pack/examples/01-simple-topology/simple-topology.conf new file mode 100644 index 0000000000..dc6de08587 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/01-simple-topology/simple-topology.conf @@ -0,0 +1,23 @@ +canton { + participants { + participant1 { + storage.type = memory + admin-api.port = 5012 + ledger-api.port = 5011 + } + participant2 { + storage.type = memory + admin-api.port = 5022 + ledger-api.port = 5021 + } + } + domains { + mydomain { + storage.type = memory + public-api.port = 5018 + admin-api.port = 5019 + } + } + // enable ledger_api commands for our getting started guide + features.enable-testing-commands = yes +} diff --git a/canton-3x/community/app/src/pack/examples/02-global-domain/README.md b/canton-3x/community/app/src/pack/examples/02-global-domain/README.md new file mode 100644 index 0000000000..37aa31ba60 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/02-global-domain/README.md @@ -0,0 +1,33 @@ +# Connection to Canton.Global + +*** +WARNING: The global Canton domain is currently not running. This example does not work at the moment. +*** +TODO(#7564) Make this example work again once the global domain is up +*** + + +Participants require a domain to communicate with each other. Digital Asset is running a generally available +global Canton domain (Canton.Global). Any participant can decide to connect to the global domain and use it +for bilateral communication. + +The global domain connectivity example demonstrates how to connect a participant node +to the global Canton domain. Currently, the global domain is operated as a test-net. +Longer term, the global domain will serve as a global fall-back committer which can be +used if no closer committer is available. + +The global domain connectivity example contains two files, a configuration file and a +script which invokes the necessary registration call and subsequently tests the connection +by pinging the digital asset node. + +``` + ../../bin/canton -c global-domain-participant.conf --bootstrap global-domain-participant.canton +``` + +After invoking above script, you will be prompted the terms of service for using the global +domain. You will have to accept it once in order to be able to use it. + +Please note that right now, the global domain is a pure test-net and we are regularly resetting +the domain entirely, wiping all the content, as we are still developing the protocol. Therefore, +just use it for demonstration purposes. + diff --git a/canton-3x/community/app/src/pack/examples/02-global-domain/global-domain-participant.canton b/canton-3x/community/app/src/pack/examples/02-global-domain/global-domain-participant.canton new file mode 100644 index 0000000000..7bb5c1fa32 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/02-global-domain/global-domain-participant.canton @@ -0,0 +1,13 @@ +nodes.local.start() + +val domainUrl = sys.env.get("DOMAIN_URL").getOrElse("https://canton.global") + +val myself = participant1 + +myself.domains.connect("global", domainUrl) + +myself.health.ping(myself) + +val da = myself.parties.list(filterParty="digitalasset").head.participants.head.participant + +myself.health.ping(da) diff --git a/canton-3x/community/app/src/pack/examples/02-global-domain/global-domain-participant.conf b/canton-3x/community/app/src/pack/examples/02-global-domain/global-domain-participant.conf new file mode 100644 index 0000000000..e9afd10fef --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/02-global-domain/global-domain-participant.conf @@ -0,0 +1,16 @@ +canton { + participants { + participant1 { + admin-api { + port= 6012 + } + ledger-api { + port = 6011 + } + storage { + type = memory + } + parameters.admin-workflow.bong-test-max-level = 12 + } + } +} diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/README.md b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/README.md new file mode 100644 index 0000000000..7efe9b1e35 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/README.md @@ -0,0 +1,137 @@ +# Advanced Configuration Example + +This example directory contains a collection of configuration files that can be used to setup domains or +participants for various purposes. The directory contains a set of sub-folders: + + - storage: contains "storage mixins" such as [memory.conf](storage/memory.conf) or [postgres.conf](storage/postgres.conf) + - nodes: contains a set of node defintions for domains and participants + - api: contains "api mixins" that modify the API behaviour such as binding to a public address or including jwt authorization + - remote: contains a set of remote node definitions for the nodes in the nodes directory. + - parameters: contains "parameter mixins" that modify the node behaviour in various ways. + +## Persistence + +For every setup, you need to decide which persistence layer you want to use. Supported are [memory.conf](storage/memory.conf), +[postgres.conf](storage/postgres.conf) or Oracle (Enterprise). Please [consult the manual](https://docs.daml.com/canton/usermanual/installation.html#persistence-using-postgres) +for further instructions. The examples here will illustrate the usage using the in-memory configuration. + +There is a small helper script in [dbinit.py](storage/dbinit.py) which you can use to create the appropriate SQL commands +to create users and databases for a series of nodes. This is convenient if you are setting up a test-network. You can +run it using: + +``` + python3 examples/03-advanced-configuration/storage/dbinit.py \ + --type=postgres --user=canton --pwd= --participants=2 --domains=1 --drop +``` + +Please run the script with ``--help`` to get an overview of all commands. Generally, you would just pipe the output +to your SQL console. + +## Nodes + +The nodes directory contains a set of base configuration files that can be used together with the mix-ins. + +### Domain + +Start a domain with the following command: + +``` + ./bin/canton -c examples/03-advanced-configuration/storage/memory.conf,examples/03-advanced-configuration/nodes/domain1.conf +``` + +The domain can be started without any bootstrap script, as it self-initialises by default, waiting for incoming connections. + +If you pass in multiple configuration files, they will be combined. It doesn't matter if you separate the +configurations using `,` or if you pass them with several `-c` options. + +NOTE: If you unpacked the zip directory, then you might have to make the canton startup script executable + (`chmod u+x bin/canton`). + +### Participants + +The participant(s) can be started the same way, just by pointing to the participant configuration file. +However, before we can use the participant for any Daml processing, we need to connect it to a domain. You can +connect to the domain interactively, or use the [initialisation script](participant-init.canton). + +``` + ./bin/canton -c examples/03-advanced-configuration/storage/memory.conf \ + -c examples/03-advanced-configuration/nodes/participant1.conf,examples/03-advanced-configuration/nodes/participant2.conf \ + --bootstrap=examples/03-advanced-configuration/participant-init.canton +``` + +The initialisation script assumes that the domain can be reached via `localhost`, which needs to change if the domain +runs on a different server. + +A setup with more participant nodes can be created using the [participant](nodes/participant1.conf) as a template. +The same applies to the domain configuration. The instance names should be changed (`participant1` to something else), +as otherwise, distinguishing the nodes in a trial run will be difficult. + +## API + +By default, all the APIs only bind to localhost. If you want to expose them on the network, you should secure them using +TLS and JWT. You can use the mixins configuration in the ``api`` subdirectory for your convenience. + +## Parameters + +The parameters directory contains a set of mix-ins to modify the behaviour of your nodes. + +- [nonuck.conf](nodes/nonuck.conf) enable non-UCK mode such that you can use multiple domains per participant node (preview). + +## Test Your Setup + +Assuming that you have started both participants and a domain, you can verify that the system works by having +participant2 pinging participant1 (the other way around also works). A ping here is just a built-in Daml +contract which gets sent from one participant to another, and the other responds by exercising a choice. + +First, just make sure that the `participant2` is connected to the domain by testing whether the following command +returns `true` +``` +@ participant2.domains.active("mydomain") +``` + +In order to ping participant1, participant2 must know participant1's `ParticipantId`. You could obtain this from +participant1's instance of the Canton console using the command `participant1.id` and copy-pasting the resulting +`ParticipantId` to participant2's Canton console. Another option is to lookup participant1's ID directly using +participant2's console: +``` +@ val participant1Id = participant2.parties.list(filterParticipant="participant1").head.participants.head.participant +``` +Using the console for participant2, you can now get the two participants to ping each other: +``` +@ participant2.health.ping(participant1Id) +``` + +## Running as Background Process + +If you start Canton with the commands above, you will always be in interactive mode within the Canton console. +You can start Canton as well as a non-interactive process using +``` + ./bin/canton daemon -c examples/03-advanced-configuration/storage/memory.conf \ + -c examples/03-advanced-configuration/nodes/participant1.conf \ + --bootstrap examples/03-advanced-configuration/participant-init.canton +``` + +## Connect To Remote Nodes + +In many cases, the nodes will run in a background process, started as `daemon`, while the user would +still like the convenience of using the console. This can be achieved by defining remote domains and +participants in the configuration file. + +A participant or domain configuration can be turned into a remote config using + +``` + ./bin/canton generate remote-config -c examples/03-advanced-configuration/storage/memory.conf,examples/03-advanced-configuration/nodes/participant1.conf +``` + +Then, if you start Canton using +``` + ./bin/canton -c remote-participant1.conf +``` +you will have a new instance `participant1`, which will expose most but not all commands +that a node exposes. As an example, run: +``` + participant1.health.status +``` + +Please note that depending on your setup, you might have to adjust the target ip address. + diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/jwt/certificate.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/jwt/certificate.conf new file mode 100644 index 0000000000..8348cdd554 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/jwt/certificate.conf @@ -0,0 +1,13 @@ +_shared { + ledger-api { + auth-services = [{ + // type can be + // jwt-rs-256-crt + // jwt-es-256-crt + // jwt-es-512-crt + type = jwt-rs-256-crt + // we need a certificate file (abcd.cert) + certificate = ${JWT_CERTIFICATE_FILE} + }] + } +} diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/jwt/jwks.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/jwt/jwks.conf new file mode 100644 index 0000000000..bcb5818141 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/jwt/jwks.conf @@ -0,0 +1,9 @@ +_shared { + ledger-api { + auth-services = [{ + type = jwt-rs-256-jwks + // we need a URL to a jwks key, e.g. https://path.to/jwks.key + url = ${JWT_URL} + }] + } +} diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/jwt/leeway-parameters.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/jwt/leeway-parameters.conf new file mode 100644 index 0000000000..796667aad3 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/jwt/leeway-parameters.conf @@ -0,0 +1,8 @@ +_shared { + parameters.ledger-api-server-parameters.jwt-timestamp-leeway { + default = 5 + expires-at = 10 + issued-at = 15 + not-before = 20 + } +} diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/jwt/unsafe-hmac256.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/jwt/unsafe-hmac256.conf new file mode 100644 index 0000000000..6adb4df2ac --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/jwt/unsafe-hmac256.conf @@ -0,0 +1,8 @@ +_shared { + ledger-api { + auth-services = [{ + type = unsafe-jwt-hmac-256 + secret = "not-safe-for-production" + }] + } +} diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/large-in-memory-fan-out.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/large-in-memory-fan-out.conf new file mode 100644 index 0000000000..df27c97e2a --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/large-in-memory-fan-out.conf @@ -0,0 +1,7 @@ +_shared { + ledger-api { + index-service { + max-transactions-in-memory-fan-out-buffer-size = 10000 // default 1000 + } + } +} diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/large-ledger-api-cache.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/large-ledger-api-cache.conf new file mode 100644 index 0000000000..9c94f7b20e --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/large-ledger-api-cache.conf @@ -0,0 +1,8 @@ +_shared { + ledger-api { + index-service { + max-contract-state-cache-size = 100000 // default 1e4 + max-contract-key-state-cache-size = 100000 // default 1e4 + } + } +} diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/public-admin.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/public-admin.conf new file mode 100644 index 0000000000..b8c808ecac --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/public-admin.conf @@ -0,0 +1,7 @@ +_shared { + admin-api { + // by default, canton binds to 127.0.0.1, only enabling localhost connections + // you need to explicitly set the address to enable connections from other hosts + address = 0.0.0.0 + } +} \ No newline at end of file diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/public.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/public.conf new file mode 100644 index 0000000000..af07b1c907 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/public.conf @@ -0,0 +1,11 @@ +_shared { + public-api { + // by default, canton binds to 127.0.0.1, only enabling localhost connections + // you need to explicitly set the address to enable connections from other hosts + address = 0.0.0.0 + } + ledger-api { + // same as for public-api + address = 0.0.0.0 + } +} \ No newline at end of file diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/wildcard.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/wildcard.conf new file mode 100644 index 0000000000..0caf7d3a94 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/api/wildcard.conf @@ -0,0 +1,7 @@ +_shared { + ledger-api { + auth-services = [{ + type = wildcard + }] + } +} diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/domain1.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/domain1.conf new file mode 100644 index 0000000000..1a1d517ca1 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/domain1.conf @@ -0,0 +1,18 @@ +canton { + domains { + domain1 { + storage = ${_shared.storage} + storage.config.properties.databaseName = "domain1" + init.domain-parameters.unique-contract-keys = ${?_.shared.unique-contract-keys} + public-api { + port = 10018 + // if defined, this include will override the address we bind to. default is 127.0.0.1 + address = ${?_shared.public-api.address} + } + admin-api { + port = 10019 + address = ${?_shared.admin-api.address} + } + } + } +} \ No newline at end of file diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/participant1.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/participant1.conf new file mode 100644 index 0000000000..cccf64c906 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/participant1.conf @@ -0,0 +1,19 @@ +canton { + participants { + participant1 { + storage = ${_shared.storage} + storage.config.properties.databaseName = "participant1" + init.parameters.unique-contract-keys = ${?_.shared.unique-contract-keys} + admin-api { + port = 10012 + // if defined, this include will override the address we bind to. default is 127.0.0.1 + address = ${?_shared.admin-api.address} + } + ledger-api { + port = 10011 + address = ${?_shared.ledger-api.address} + auth-services = ${?_shared.ledger-api.auth-services} + } + } + } +} \ No newline at end of file diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/participant2.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/participant2.conf new file mode 100644 index 0000000000..6ea6e40d4a --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/participant2.conf @@ -0,0 +1,19 @@ +canton { + participants { + participant2 { + storage = ${_shared.storage} + storage.config.properties.databaseName = "participant2" + init.parameters.unique-contract-keys = ${?_.shared.unique-contract-keys} + admin-api { + port = 10022 + // if defined, this include will override the address we bind to. default is 127.0.0.1 + address = ${?_shared.admin-api.address} + } + ledger-api { + port = 10021 + address = ${?_shared.ledger-api.address} + auth-services = ${?_shared.ledger-api.auth-services} + } + } + } +} \ No newline at end of file diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/participant3.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/participant3.conf new file mode 100644 index 0000000000..fd8c0cf04b --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/participant3.conf @@ -0,0 +1,19 @@ +canton { + participants { + participant3 { + storage = ${_shared.storage} + storage.config.properties.databaseName = "participant3" + init.parameters.unique-contract-keys = ${?_.shared.unique-contract-keys} + admin-api { + port = 10032 + // if defined, this include will override the address we bind to. default is 127.0.0.1 + address = ${?_shared.admin-api.address} + } + ledger-api { + port = 10031 + address = ${?_shared.ledger-api.address} + auth-services = ${?_shared.ledger-api.auth-services} + } + } + } +} diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/participant4.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/participant4.conf new file mode 100644 index 0000000000..1d3d34a617 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/nodes/participant4.conf @@ -0,0 +1,19 @@ +canton { + participants { + participant4 { + storage = ${_shared.storage} + storage.config.properties.databaseName = "participant4" + init.parameters.unique-contract-keys = ${?_.shared.unique-contract-keys} + admin-api { + port = 10042 + // if defined, this include will override the address we bind to. default is 127.0.0.1 + address = ${?_shared.admin-api.address} + } + ledger-api { + port = 10041 + address = ${?_shared.ledger-api.address} + auth-services = ${?_shared.ledger-api.auth-services} + } + } + } +} diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/parameters/nonuck.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/parameters/nonuck.conf new file mode 100644 index 0000000000..5e221d7023 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/parameters/nonuck.conf @@ -0,0 +1,3 @@ +_shared { + unique-contract-keys = no +} \ No newline at end of file diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/participant-init.canton b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/participant-init.canton new file mode 100644 index 0000000000..86c47327e1 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/participant-init.canton @@ -0,0 +1,22 @@ + +val participant = participants.local.head + +// only run once +if(participant.domains.list_registered().isEmpty) { + + // connect all local participants to the domain passing a user chosen alias and the domain port as the argument + participants.local.foreach(_.domains.connect("mydomain", "http://localhost:10018")) + + // above connect operation is asynchronous. it is generally at the discretion of the domain + // to decide if a participant can join and when. therefore, we need to asynchronously wait here + // until the participant observes its activation on the domain + utils.retry_until_true { + participant.domains.active("mydomain") + } + // synchronize vetting to ensure the participant has the package needed for the ping + participant.packages.synchronize_vetting() + + // verify that the connection works + participant.health.ping(participant) + +} \ No newline at end of file diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/remote/domain1.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/remote/domain1.conf new file mode 100644 index 0000000000..7ebc36718e --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/remote/domain1.conf @@ -0,0 +1,14 @@ +canton { + remote-domains { + remoteDomain1 { + public-api { + address = 127.0.0.1 + port = 10018 + } + admin-api { + port = 10019 + address = 127.0.0.1 // default value if omitted + } + } + } +} \ No newline at end of file diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/remote/participant1.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/remote/participant1.conf new file mode 100644 index 0000000000..39dacaef6c --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/remote/participant1.conf @@ -0,0 +1,14 @@ +canton { + remote-participants { + remoteParticipant1 { + admin-api { + port = 10012 + address = 127.0.0.1 // is the default value if omitted + } + ledger-api { + port = 10011 + address = 127.0.0.1 // is the default value if omitted + } + } + } +} \ No newline at end of file diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/storage/dbinit.py b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/storage/dbinit.py new file mode 100644 index 0000000000..c980532f4e --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/storage/dbinit.py @@ -0,0 +1,51 @@ +#!/usr/bin/python3 +# +# Trivial helper script to create users / databases for Canton nodes +# + +import argparse +import sys + +def get_parser(): + parser = argparse.ArgumentParser(description = "Helper utility to setup Canton databases for a set of nodes") + parser.add_argument("--type", help="Type of database to be setup", choices=["postgres"], default="postgres") + parser.add_argument("--participants", type=int, help="Number of participant dbs to generate (will create dbs named participantX for 1 to N)", default=0) + parser.add_argument("--domains", type=int, help="Number of domain dbs to generate (will create dbs named domainX for 1 to N)", default=0) + parser.add_argument("--sequencers", type=int, help="Number of sequencer dbs to generate (will create dbs named sequencerX for 1 to N", default=0) + parser.add_argument("--mediators", type=int, help="Number of mediators dbs to generate (will create dbs named mediatorX for 1 to N", default=0) + parser.add_argument("--user", type=str, help="Database user name. If given, the script will also generate a SQL command to create the user", required=True) + parser.add_argument("--pwd", type=str, help="Database password") + parser.add_argument("--drop", help="Drop existing", action="store_true") + return parser.parse_args() + +def do_postgres(args): + print(""" +DO +$do$ +BEGIN + IF NOT EXISTS ( + SELECT FROM pg_catalog.pg_roles + WHERE rolname = '%s') THEN + CREATE ROLE \"%s\" LOGIN PASSWORD '%s'; + END IF; +END +$do$; +""" % (args.user, args.user, args.pwd)) + for num, prefix in [(args.domains, "domain"), (args.participants, "participant"), (args.mediators, "mediator"), (args.sequencers, "sequencer")]: + for ii in range(1, num + 1): + dbname = prefix + str(ii) + if args.drop: + print("DROP DATABASE IF EXISTS %s;" % (dbname)) + print("CREATE DATABASE %s;" % dbname) + print("GRANT ALL ON DATABASE %s to \"%s\";" % (dbname, args.user)) + +if __name__ == "__main__": + args = get_parser() + if args.type == "postgres": + do_postgres(args) + else: + raise Exception("Unknown database type %s" % (args.type)) + + + + diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/storage/h2.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/storage/h2.conf new file mode 100644 index 0000000000..94529a52a8 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/storage/h2.conf @@ -0,0 +1,20 @@ +# File based H2 configuration mixin +# +# This file defines a shared configuration resources. You can mix it into your configuration by +# refer to the shared storage resource and add the database name. +# +# Check nodes/participant1.conf as an example +# +# Please note that using H2 is currently not advised and not supported. +# + +_shared { + storage { + type = "h2" + config = { + user = "canton" + password = "morethansafe" + driver = org.h2.Driver + } + } +} \ No newline at end of file diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/storage/memory.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/storage/memory.conf new file mode 100644 index 0000000000..e40e01c29f --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/storage/memory.conf @@ -0,0 +1,5 @@ +_shared { + storage { + type = "memory" + } +} \ No newline at end of file diff --git a/canton-3x/community/app/src/pack/examples/03-advanced-configuration/storage/postgres.conf b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/storage/postgres.conf new file mode 100644 index 0000000000..9a1094e42b --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/03-advanced-configuration/storage/postgres.conf @@ -0,0 +1,37 @@ +# Postgres persistence configuration mixin +# +# This file defines a shared configuration resources. You can mix it into your configuration by +# refer to the shared storage resource and add the database name. +# +# Example: +# participant1 { +# storage = ${_shared.storage} +# storage.config.properties.databaseName = "participant1" +# } +# +# The user and password credentials are set to "canton" and "supersafe". As this is not "supersafe", you might +# want to either change this configuration file or pass the settings in via environment variables. +# +_shared { + storage { + type = postgres + config { + dataSourceClass = "org.postgresql.ds.PGSimpleDataSource" + properties = { + serverName = "localhost" + # the next line will override above "serverName" in case the environment variable POSTGRES_HOST exists + serverName = ${?POSTGRES_HOST} + portNumber = "5432" + portNumber = ${?POSTGRES_PORT} + # the next line will fail configuration parsing if the POSTGRES_USER environment variable is not set + user = ${POSTGRES_USER} + password = ${POSTGRES_PASSWORD} + } + } + // If defined, will configure the number of database connections per node. + // Please ensure that your database is setup with sufficient connections. + // If not configured explicitly, every node will create one connection per core on the host machine. This is + // subject to change with future improvements. + parameters.max-connections = ${?POSTGRES_NUM_CONNECTIONS} + } +} diff --git a/canton-3x/community/app/src/pack/examples/04-create-daml-app/canton.conf b/canton-3x/community/app/src/pack/examples/04-create-daml-app/canton.conf new file mode 100644 index 0000000000..3d2fc5eaa0 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/04-create-daml-app/canton.conf @@ -0,0 +1,45 @@ +canton { + features { + enable-testing-commands = yes + enable-preview-commands = yes + } + + domains { + mydomain { + storage { + type = memory + } + + public-api.port = 14008 + admin-api.port = 14009 + } + } + + participants { + + participant1 { + storage { + type = memory + } + admin-api { + port = 12012 + } + ledger-api { + port = 12011 + } + } + + participant2 { + storage { + type = memory + } + admin-api { + port = 12022 + } + ledger-api { + port = 12021 + } + } + } +} + diff --git a/canton-3x/community/app/src/pack/examples/04-create-daml-app/init.canton b/canton-3x/community/app/src/pack/examples/04-create-daml-app/init.canton new file mode 100644 index 0000000000..eca961018e --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/04-create-daml-app/init.canton @@ -0,0 +1,46 @@ +import better.files._ + +val createDamlAppDir = File(sys.props.get("create-daml-app-dir").getOrElse("create-daml-app")) + +val createDamlAppDar = createDamlAppDir / ".daml" / "dist" / "create-daml-app-0.1.0.dar" + +// check that files exist where we expect them +createDamlAppDir.exists || sys.error(s"please setup the create-daml-app example in the current working directly under ${createDamlAppDir}") + +createDamlAppDar.exists || sys.error(s"please run daml build in the create-daml-app folder to build the dar file ${createDamlAppDar}") + +participant1.domains.connect_local(mydomain) +participant2.domains.connect_local(mydomain) +val alice = participant1.parties.enable("Alice") +val bob = participant2.parties.enable("Bob") +val public = participant1.parties.enable("Public") +participant1.topology.party_to_participant_mappings.authorize( + TopologyChangeOp.Add, + party = public, + participant = participant2.id, + side = RequestSide.From, + permission = ParticipantPermission.Observation, +) +participant2.topology.party_to_participant_mappings.authorize( + TopologyChangeOp.Add, + party = public, + participant = participant2.id, + side = RequestSide.To, + permission = ParticipantPermission.Observation, +) + +participant1.ledger_api.users.create( + id = "alice", + actAs = Set(alice.toLf), + primaryParty = Some(alice.toLf), + readAs = Set(public.toLf), +) +participant2.ledger_api.users.create( + id = "bob", + actAs = Set(bob.toLf), + primaryParty = Some(bob.toLf), + readAs = Set(public.toLf), +) + +// upload dar file to participants +participants.all.dars.upload(createDamlAppDar.pathAsString) diff --git a/canton-3x/community/app/src/pack/examples/05-composability/README.md b/canton-3x/community/app/src/pack/examples/05-composability/README.md new file mode 100644 index 0000000000..61ccc8cf63 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/05-composability/README.md @@ -0,0 +1,20 @@ +# Composability Example + +The composability example features a workflow that spans across two domains. +It starts two domains and five participants in a single process. +The details are described in the [composability tutorial]( +https://docs.daml.com/canton/tutorials/composability.html). + +The composability examples can be invoked from the root directory of the Canton release using + +``` + ./bin/canton -c examples/05-composability/composability.conf --bootstrap examples/05-composability/composability1.canton + ./bin/canton -c examples/05-composability/composability.conf --bootstrap examples/05-composability/composability2.canton +``` + +It can be run from other directories if the path to the CantonExamples.dar file in the examples folder +is set as the system property canton-examples.dar-path: + +``` + ./bin/canton -Dcanton-examples.dar-path= -c examples/05-composability/composability.conf --bootstrap examples/05-composability/composability1.canton +``` \ No newline at end of file diff --git a/canton-3x/community/app/src/pack/examples/05-composability/composability-auto-transfer.canton b/canton-3x/community/app/src/pack/examples/05-composability/composability-auto-transfer.canton new file mode 100644 index 0000000000..b59153d879 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/05-composability/composability-auto-transfer.canton @@ -0,0 +1,91 @@ +// Composability tutorial part 1 - auto transfers section + +// update parameters +iou.service.update_dynamic_domain_parameters( + _.update(transferExclusivityTimeout = Duration.Zero) +) // disable automatic transfer-in + +paint.service.update_dynamic_domain_parameters( + _.update(transferExclusivityTimeout = 2.seconds) +) + +// connect participants to the domain +participant1.domains.connect_local(iou) +participant2.domains.connect_local(iou) +participant3.domains.connect_local(iou) +participant2.domains.connect_local(paint) +participant3.domains.connect_local(paint) + +val paintAlias = paint.name +val iouAlias = iou.name + +// create the parties +val Bank = participant1.parties.enable("Bank") +val HouseOwner = participant2.parties.enable("House Owner") +val Painter = participant3.parties.enable("Painter") + +// Wait until the party enabling has taken effect and has been observed at the participants +val partyAssignment = Set(Bank -> participant1, HouseOwner -> participant2, Painter -> participant3) +participant2.parties.await_topology_observed(partyAssignment) +participant3.parties.await_topology_observed(partyAssignment) + +// upload the Daml model to all participants +val darPath = Option(System.getProperty("canton-examples.dar-path")).getOrElse("dars/CantonExamples.dar") +participants.all.dars.upload(darPath) + +import com.digitalasset.canton.BigDecimalImplicits._ +import com.digitalasset.canton.examples.java.iou.{Amount, Iou} +import com.digitalasset.canton.examples.java.paint.{OfferToPaintHouseByOwner, PaintHouse} +import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil.decodeAllCreated +import com.digitalasset.canton.protocol.ContractIdSyntax._ + +import scala.jdk.CollectionConverters._ + +// architecture-handbook-entry-begin: creation +// Bank creates IOU for the house owner +val createIouCmd = new Iou( + Bank.toProtoPrimitive, + HouseOwner.toProtoPrimitive, + new Amount(100.0.toBigDecimal, "USD"), + List.empty.asJava +).create.commands.asScala.toSeq +val Seq(iouContractUnshared) = decodeAllCreated(Iou.COMPANION)( + participant1.ledger_api.javaapi.commands.submit_flat(Seq(Bank), createIouCmd)) + +// Wait until the house owner sees the IOU in the active contract store +participant2.ledger_api.acs.await_active_contract(HouseOwner, iouContractUnshared.id.toLf) + +// The house owner adds the Painter as an observer on the IOU +val showIouCmd = iouContractUnshared.id.exerciseShare(Painter.toProtoPrimitive).commands.asScala.toSeq +val Seq(iouContract) = decodeAllCreated(Iou.COMPANION)(participant2.ledger_api.javaapi.commands.submit_flat(Seq(HouseOwner), showIouCmd)) + +// The house owner creates a paint offer using participant 2 and the Paint domain +val paintOfferCmd = new OfferToPaintHouseByOwner( + HouseOwner.toProtoPrimitive, + Painter.toProtoPrimitive, + Bank.toProtoPrimitive, + iouContract.id +).create.commands.asScala.toSeq +val Seq(paintOffer) = decodeAllCreated(OfferToPaintHouseByOwner.COMPANION)( + participant2.ledger_api.javaapi.commands.submit_flat(Seq(HouseOwner), paintOfferCmd, workflowId = paint.name)) +// architecture-handbook-entry-end: creation + +// architecture-handbook-entry-begin: accept +// Wait until the painter sees the paint offer in the active contract store +participant3.ledger_api.acs.await_active_contract(Painter, paintOffer.id.toLf) + +// Painter accepts the paint offer on the IOU domain +val acceptCmd = paintOffer.id.exerciseAcceptByPainter().commands.asScala.toSeq +val acceptTx = participant3.ledger_api.javaapi.commands.submit_flat(Seq(Painter), acceptCmd) +val Seq(painterIou) = decodeAllCreated(Iou.COMPANION)(acceptTx) +val Seq(paintHouse) = decodeAllCreated(PaintHouse.COMPANION)(acceptTx) +// architecture-handbook-entry-end: accept + +// architecture-handbook-entry-begin: cash +// Painter converts the Iou into cash +participant3.ledger_api.javaapi.commands.submit_flat( + Seq(Painter), + painterIou.id.exerciseCall().commands.asScala.toSeq, + iou.name +) +// architecture-handbook-entry-end: cash diff --git a/canton-3x/community/app/src/pack/examples/05-composability/composability.conf b/canton-3x/community/app/src/pack/examples/05-composability/composability.conf new file mode 100644 index 0000000000..d8eae9280b --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/05-composability/composability.conf @@ -0,0 +1,69 @@ +// architecture-handbook-entry-begin: topology-part1 +canton { + features { + enable-preview-commands = yes + enable-testing-commands = yes + } + monitoring { + tracing.propagation = enabled + logging.api.message-payloads = true + } + domains { + iou { + public-api.port = 13018 + admin-api.port = 13019 + storage.type = memory + init.domain-parameters.unique-contract-keys = false + } + + paint { + public-api.port = 13028 + admin-api.port = 13029 + storage.type = memory + init.domain-parameters.unique-contract-keys = false + } + } + + participants { + participant1 { + ledger-api.port = 13011 + admin-api.port = 13012 + storage.type = memory + init.parameters.unique-contract-keys = false + } + + participant2 { + ledger-api.port = 13021 + admin-api.port = 13022 + storage.type = memory + init.parameters.unique-contract-keys = false + } + + participant3 { + ledger-api.port = 13031 + admin-api.port = 13032 + storage.type = memory + init.parameters.unique-contract-keys = false + } + } +} +// architecture-handbook-entry-end: topology-part1 +// architecture-handbook-entry-begin: topology-part2 +canton { + participants { + participant4 { + ledger-api.port = 13041 + admin-api.port = 13042 + storage.type = memory + init.parameters.unique-contract-keys = false + } + + participant5 { + ledger-api.port = 13051 + admin-api.port = 13052 + storage.type = memory + init.parameters.unique-contract-keys = false + } + } +} +// architecture-handbook-entry-end: topology-part2 diff --git a/canton-3x/community/app/src/pack/examples/05-composability/composability1.canton b/canton-3x/community/app/src/pack/examples/05-composability/composability1.canton new file mode 100644 index 0000000000..fe912aa2ca --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/05-composability/composability1.canton @@ -0,0 +1,127 @@ +// Composability tutorial part 1 + +// architecture-handbook-entry-begin: topology + +// update parameters +iou.service.update_dynamic_domain_parameters( + _.update(transferExclusivityTimeout = Duration.Zero) +) // disable automatic transfer-in + +paint.service.update_dynamic_domain_parameters( + _.update(transferExclusivityTimeout = 2.seconds) +) + +// connect participants to the domain +participant1.domains.connect_local(iou) +participant2.domains.connect_local(iou) +participant3.domains.connect_local(iou) +participant2.domains.connect_local(paint) +participant3.domains.connect_local(paint) + +// the connect call will use the configured domain name as an alias. the configured +// name is the one used in the configuration file. +// in reality, all participants pick the alias names they want, which means that +// aliases are not unique, whereas a `DomainId` is. However, the +// alias is convenient, while the DomainId is a rather long string including a hash. +// therefore, for commands, we prefer to use a short alias instead. +val paintAlias = paint.name +val iouAlias = iou.name + +// create the parties +val Bank = participant1.parties.enable("Bank") +val HouseOwner = participant2.parties.enable("House Owner") +val Painter = participant3.parties.enable("Painter") + +// Wait until the party enabling has taken effect and has been observed at the participants +val partyAssignment = Set(Bank -> participant1, HouseOwner -> participant2, Painter -> participant3) +participant2.parties.await_topology_observed(partyAssignment) +participant3.parties.await_topology_observed(partyAssignment) + +// upload the Daml model to all participants +val darPath = Option(System.getProperty("canton-examples.dar-path")).getOrElse("dars/CantonExamples.dar") +participants.all.dars.upload(darPath) +// architecture-handbook-entry-end: topology + +// architecture-handbook-entry-begin: imports +import com.digitalasset.canton.BigDecimalImplicits._ +import com.digitalasset.canton.examples.java.iou.{Amount, Iou} +import com.digitalasset.canton.examples.java.paint.{OfferToPaintHouseByOwner, PaintHouse} +import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil.decodeAllCreated +import com.digitalasset.canton.protocol.ContractIdSyntax._ + +import scala.jdk.CollectionConverters._ +// architecture-handbook-entry-end: imports + +// architecture-handbook-entry-begin: iou +// Bank creates IOU for the house owner +val createIouCmd = new Iou( + Bank.toProtoPrimitive, + HouseOwner.toProtoPrimitive, + new Amount(100.0.toBigDecimal, "USD"), + List.empty.asJava +).create.commands.asScala.toSeq +val Seq(iouContractUnshared) = decodeAllCreated(Iou.COMPANION)( + participant1.ledger_api.javaapi.commands.submit_flat(Seq(Bank), createIouCmd)) + +// Wait until the house owner sees the IOU in the active contract store +participant2.ledger_api.acs.await_active_contract(HouseOwner, iouContractUnshared.id.toLf) + +// The house owner adds the Painter as an observer on the IOU +val shareIouCmd = iouContractUnshared.id.exerciseShare(Painter.toProtoPrimitive).commands.asScala.toSeq +val Seq(iouContract) = decodeAllCreated(Iou.COMPANION)(participant2.ledger_api.javaapi.commands.submit_flat(Seq(HouseOwner), shareIouCmd)) +// architecture-handbook-entry-end: iou + +// architecture-handbook-entry-begin: paint-offer +// The house owner creates a paint offer using participant 2 and the Paint domain +val paintOfferCmd = new OfferToPaintHouseByOwner( + HouseOwner.toProtoPrimitive, + Painter.toProtoPrimitive, + Bank.toProtoPrimitive, + iouContract.id +).create.commands.asScala.toSeq +val Seq(paintOffer) = decodeAllCreated(OfferToPaintHouseByOwner.COMPANION)( + participant2.ledger_api.javaapi.commands.submit_flat(Seq(HouseOwner), paintOfferCmd, workflowId = paint.name)) +// architecture-handbook-entry-end: paint-offer + +// architecture-handbook-entry-begin: transfer +// Wait until the painter sees the paint offer in the active contract store +participant3.ledger_api.acs.await_active_contract(Painter, paintOffer.id.toLf) + +// Painter transfers the paint offer to the IOU domain +participant3.transfer.execute( + Painter, // Initiator of the transfer + paintOffer.id.toLf, // Contract to be transferred + paintAlias, // Source domain + iouAlias // Target domain +) +// architecture-handbook-entry-end: transfer + +// architecture-handbook-entry-begin: accept +// Painter accepts the paint offer on the IOU domain +val acceptCmd = paintOffer.id.exerciseAcceptByPainter().commands.asScala.toSeq +val acceptTx = participant3.ledger_api.javaapi.commands.submit_flat(Seq(Painter), acceptCmd) +val Seq(painterIou) = decodeAllCreated(Iou.COMPANION)(acceptTx) +val Seq(paintHouse) = decodeAllCreated(PaintHouse.COMPANION)(acceptTx) +// architecture-handbook-entry-end: accept + +// architecture-handbook-entry-begin: transfer-back +// Wait until the house owner sees the PaintHouse agreement +participant2.ledger_api.acs.await_active_contract(HouseOwner, paintHouse.id.toLf) + +// The house owner moves the PaintHouse agreement back to the Paint domain +participant2.transfer.execute( + HouseOwner, + paintHouse.id.toLf, + iouAlias, + paintAlias +) +// architecture-handbook-entry-end: transfer-back + +// architecture-handbook-entry-begin: call +// Painter converts the Iou into cash +participant3.ledger_api.javaapi.commands.submit_flat( + Seq(Painter), + painterIou.id.exerciseCall().commands.asScala.toSeq, + iou.name +) +// architecture-handbook-entry-end: call diff --git a/canton-3x/community/app/src/pack/examples/05-composability/composability2.canton b/canton-3x/community/app/src/pack/examples/05-composability/composability2.canton new file mode 100644 index 0000000000..214e6eb53a --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/05-composability/composability2.canton @@ -0,0 +1,151 @@ +// Composability tutorial part 2 + +// architecture-handbook-entry-begin: topology + +// update parameters +iou.service.update_dynamic_domain_parameters( + _.update(transferExclusivityTimeout = Duration.Zero) +) // disable automatic transfer-in + +paint.service.update_dynamic_domain_parameters( + _.update(transferExclusivityTimeout = 2.seconds) +) + +// connect participants to the domain +participant1.domains.connect_local(iou) +participant2.domains.connect_local(iou) +participant3.domains.connect_local(iou) +participant2.domains.connect_local(paint) +participant3.domains.connect_local(paint) +participant4.domains.connect_local(iou) +participant5.domains.connect_local(iou) + +val iouAlias = iou.name +val paintAlias = paint.name + +// create the parties +val Bank = participant1.parties.enable("Bank") +val HouseOwner = participant2.parties.enable("House Owner") +val Painter = participant3.parties.enable("Painter", waitForDomain = DomainChoice.All) + +// enable the house owner on participant 5 and the painter on participant 4 +// as explained in the identity management documentation at +// https://docs.daml.com/canton/usermanual/identity_management.html#party-on-two-nodes +import com.digitalasset.canton.console.ParticipantReference +def authorizePartyParticipant(partyId: PartyId, createdAt: ParticipantReference, to: ParticipantReference): Unit = { + val createdAtP = createdAt.id + val toP = to.id + createdAt.topology.party_to_participant_mappings.authorize(TopologyChangeOp.Add, partyId, toP, RequestSide.From) + to.topology.party_to_participant_mappings.authorize(TopologyChangeOp.Add, partyId, toP, RequestSide.To) +} +authorizePartyParticipant(HouseOwner, participant2, participant5) +authorizePartyParticipant(Painter, participant3, participant4) + +// Wait until the party enabling has taken effect and has been observed at the participants +val partyAssignment = Set(HouseOwner -> participant2, HouseOwner -> participant5, Painter -> participant3, Painter -> participant4) +participant2.parties.await_topology_observed(partyAssignment) +participant3.parties.await_topology_observed(partyAssignment) + +// upload the Daml model to all participants +val darPath = Option(System.getProperty("canton-examples.dar-path")).getOrElse("dars/CantonExamples.dar") +participants.all.dars.upload(darPath) +// architecture-handbook-entry-end: topology + +// architecture-handbook-entry-begin: setup +import com.digitalasset.canton.BigDecimalImplicits._ +import com.digitalasset.canton.examples.java.iou.{Amount, Iou} +import com.digitalasset.canton.examples.java.paint.{OfferToPaintHouseByOwner, PaintHouse} +import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil.decodeAllCreated +import com.digitalasset.canton.protocol.ContractIdSyntax._ + +import scala.jdk.CollectionConverters._ + +val createIouCmd = new Iou( + Bank.toProtoPrimitive, + HouseOwner.toProtoPrimitive, + new Amount(100.0.toBigDecimal, "USD"), + List.empty.asJava +).create.commands.asScala.toSeq +val Seq(iouContractUnshared) = decodeAllCreated(Iou.COMPANION)( + participant1.ledger_api.javaapi.commands.submit_flat(Seq(Bank), createIouCmd)) + +// Wait until the house owner sees the IOU in the active contract store +participant2.ledger_api.acs.await_active_contract(HouseOwner, iouContractUnshared.id.toLf) + +// The house owner adds the Painter as an observer on the IOU +val shareIouCmd = iouContractUnshared.id.exerciseShare(Painter.toProtoPrimitive).commands.asScala.toSeq +val Seq(iouContract) = decodeAllCreated(Iou.COMPANION)(participant2.ledger_api.javaapi.commands.submit_flat(Seq(HouseOwner), shareIouCmd)) +// architecture-handbook-entry-end: setup + +// architecture-handbook-entry-begin: paint-offer +// The house owner creates a paint offer using participant 2 and the Paint domain +val paintOfferCmd = new OfferToPaintHouseByOwner( + HouseOwner.toProtoPrimitive, + Painter.toProtoPrimitive, + Bank.toProtoPrimitive, + iouContract.id +).create.commands.asScala.toSeq +val Seq(paintOffer) = decodeAllCreated(OfferToPaintHouseByOwner.COMPANION)( + participant2.ledger_api.javaapi.commands.submit_flat(Seq(HouseOwner), paintOfferCmd, workflowId = paint.name)) +// architecture-handbook-entry-end: paint-offer + +// architecture-handbook-entry-begin: transfer-out +// Wait until the painter sees the paint offer in the active contract store +participant3.ledger_api.acs.await_active_contract(Painter, paintOffer.id.toLf) + +// Painter transfers the paint offer to the IOU domain +val paintOfferTransferId = participant3.transfer.out( + Painter, // Initiator of the transfer + paintOffer.id.toLf, // Contract to be transferred + paintAlias, // Source domain + iouAlias // Target domain +) +// architecture-handbook-entry-end: transfer-out + +// architecture-handbook-entry-begin: transfer-in +participant3.transfer.in(Painter, paintOfferTransferId, iouAlias) +// architecture-handbook-entry-end: transfer-in + +// architecture-handbook-entry-begin: accept +// Wait until the Painter sees the IOU contract on participant 3. +participant3.ledger_api.acs.await_active_contract(Painter, iouContract.id.toLf) + +// Painter accepts the paint offer on the Iou domain +val acceptCmd = paintOffer.id.exerciseAcceptByPainter().commands.asScala.toSeq +val acceptTx = participant3.ledger_api.javaapi.commands.submit_flat(Seq(Painter), acceptCmd) +val Seq(painterIou) = decodeAllCreated(Iou.COMPANION)(acceptTx) +val Seq(paintHouse) = decodeAllCreated(PaintHouse.COMPANION)(acceptTx) +// architecture-handbook-entry-end: accept + +// architecture-handbook-entry-begin: automatic-transfer-in +// Wait until the house owner sees the PaintHouse agreement +participant2.ledger_api.acs.await_active_contract(HouseOwner, paintHouse.id.toLf) + +val paintHouseId = paintHouse.id +// The house owner moves the PaintHouse agreement back to the Paint domain +participant2.transfer.out( + HouseOwner, + paintHouseId.toLf, + iouAlias, + paintAlias +) +// After the exclusivity period, which is set to 2 seconds, +// the contract is automatically transferred into the target domain +utils.retry_until_true(10.seconds) { + // in the absence of other activity, force the participants to update their view of the latest domain time + participant2.testing.fetch_domain_times() + participant3.testing.fetch_domain_times() + + participant3.testing.acs_search(paint.name, filterId=paintHouseId.contractId).nonEmpty && + participant2.testing.acs_search(paint.name, filterId=paintHouseId.contractId).nonEmpty +} +// architecture-handbook-entry-end: automatic-transfer-in + +// architecture-handbook-entry-begin: call +// Painter converts the Iou into cash +participant4.ledger_api.javaapi.commands.submit_flat( + Seq(Painter), + painterIou.id.exerciseCall().commands.asScala.toSeq, + iou.name +) +// architecture-handbook-entry-end: call diff --git a/canton-3x/community/app/src/pack/examples/06-messaging/README.md b/canton-3x/community/app/src/pack/examples/06-messaging/README.md new file mode 100644 index 0000000000..0edacffd82 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/06-messaging/README.md @@ -0,0 +1,141 @@ +# Messaging via the global domain + +*** +WARNING: The global Canton domain is currently not running. This example does not work at the moment. +You need to start your own Canton domain and set the environment variable canton-examples.domain-url +to the URL of your domain. +*** +TODO(#7564) Make this example work again once the global domain is up +*** + +Participants require a domain to communicate with each other. Digital +Asset is running a generally available global Canton domain +(Canton.Global). Any participant can decide to connect to the global +domain and use it for bilateral communication. + +The messaging example provides a simple messaging application via the +global domain. + +The example is structured as follows: + +``` + . + |-- message Daml model for messages + | |- .daml/dist/message-0.0.1.dar Compiled DAR file + | |- daml/Message.daml Daml source code for messages + | |- daml.yaml Daml configuration file + | |- frontend-config.js Configuration file for Daml Navigator + | + |-- contact Daml model for contacts + | |- daml/Contact.daml Incomplete Daml source code for contacts + | |- daml/Contact.solution Example solution for the Daml exercise below + | |- daml.yaml Daml configuration file + | |- frontend-config.js Configuration file for Daml Navigator + | + |-- canton.conf Configuration file for one participant + |-- init.canton Initialization script for Canton +``` + +The files in `message` must not be changed because it defines the +format of messages to be exchanged. So `message-0.0.1.dar` must be +the same on all participants that want to exchange messages. + + +Run the application by performing the following steps: + +1. Compile the contact model by issuing the command `daml build` in + the `contact` folder. This should generate the file + `contact/.daml/dist/contact-0.0.1.dar`. + +2. Start Canton from the `06-messaging` folder with the following command + + ``` + ../../bin/canton -c canton.conf --bootstrap init.canton + ``` + + If you have never connected to the global domain before, you will + be shown the terms of service for using the global domain. You will + have to accept it once in order to be able to use it. + + Next, you will be asked for your username in the messaging + application. Canton usernames may contain only letters, numbers, + `-` and `_` and may not be longer than 189. Canton will suffix your + username to make it globally unique. Your suffixed user name will + be output on the screen. + + You can set the username in the Java system property + `canton-examples.username` as a command-line argument: + + ``` + ../../bin/canton -c canton.conf --bootstrap init.canton -Dcanton-examples.username=Alice + ``` + +3. Start Daml Navigator. + + After step 2, Canton outputs the command that you need to run to + start Daml Navigator. Run the command in a separate terminal from + the `contact` folder. Typically, the command looks as follows: + + ``` + daml navigator server localhost 7011 -t wallclock --port 7015 -c ui-backend-participant1.conf + ``` + + This will start the frontend on port 7015. + +4. Open a browser and point it to `http://localhost:7015`. + Login with your chosen username. + +5. Find someone else whom you want to send a message. You can search + for usernames with the following command in the Canton console: + + ``` + findUser("Alice") + ``` + + This will list all suffixed usernames that contain the string + `Alice`. Note that these users need not be currently online. + + Click on the `Message:Message` template in the `Templates` view of + Navigator to create a new message. Put your suffixed username as + `sender` and the recipient's suffixed username as `receiver`. + + Click `Submit` to send the message. A `Message:Message` contract + should soon be shown in the `Contracts` table as well as under `Sent`. + + The receiver can use the `Reply` choice to send a message back. + + Stop Canton and Navigator after that. + + Note: Canton is configured to run with a file-based database. + Your username suffix and the messages will be persisted + on your computer in the file `participant1.mv.db`. + Delete this file if you want to start afresh. + +6. Extend the `Contact` Daml model. As is, you must specify suffixed + username of yourself and your contact whenever you send a new + message. The `Contact` template in `contact/daml/Contact.daml` + can store these usernames, but it does not have any choices yet. + + Add a non-consuming choice `Send` to the `Contact` template that + takes a message as parameter. It shall create a `Message` with + `myself` as sender, `other` as recipient, and the given message. + + Write a script to test the message sending via a `Contact` contract + and run the script in Daml studio. + + Compile the extended `Contact` Daml model by running `daml build` + in the `contact` folder. + +7. Restart Canton and Navigator as described in Step 5. + You will be shown a reminder of your suffixed user name + instead of being asked for one. + + Create a `Contact` contract for your counterparty. + Use the `Send` choice on the `Contact` to send a message. + + Since you have modified the `Contact` template, there will be now + several `Contact` templates in the `Templates` tab; one for each + version. Your existing `Contact` contracts refer to the old + version and therefore do not offer the `Send` choice. You would + have to explicitly upgrade the contracts; this process is explained + in the Daml documentation at https://docs.daml.com/upgrade/index.html. diff --git a/canton-3x/community/app/src/pack/examples/06-messaging/canton.conf b/canton-3x/community/app/src/pack/examples/06-messaging/canton.conf new file mode 100644 index 0000000000..f388aa5051 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/06-messaging/canton.conf @@ -0,0 +1,22 @@ +canton { + participants { + participant1 { + admin-api { + port= 7012 + } + ledger-api { + port = 7011 + } + storage { + type = "h2" + config = { + connectionPool = disabled + url = "jdbc:h2:file:./participant1;MODE=PostgreSQL;LOCK_TIMEOUT=10000;DB_CLOSE_DELAY=-1" + user = "participant1" + password = "morethansafe" + driver = org.h2.Driver + } + } + } + } +} diff --git a/canton-3x/community/app/src/pack/examples/06-messaging/contact/.gitignore b/canton-3x/community/app/src/pack/examples/06-messaging/contact/.gitignore new file mode 100644 index 0000000000..6250c7b91b --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/06-messaging/contact/.gitignore @@ -0,0 +1,2 @@ +/.daml +ui-backend-participant1.conf diff --git a/canton-3x/community/app/src/pack/examples/06-messaging/contact/daml.yaml b/canton-3x/community/app/src/pack/examples/06-messaging/contact/daml.yaml new file mode 100644 index 0000000000..d36875cf6b --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/06-messaging/contact/daml.yaml @@ -0,0 +1,12 @@ +sdk-version: 2.9.0-snapshot.20231128.12429.0.vcd189081 +sandbox-options: +- --wall-clock-time +name: contact +data-dependencies: +- ../message/.daml/dist/message-0.0.1.dar +source: daml +version: 0.0.1 +dependencies: +- daml-prim +- daml-stdlib +- daml-script diff --git a/canton-3x/community/app/src/pack/examples/06-messaging/contact/daml/Contact.daml b/canton-3x/community/app/src/pack/examples/06-messaging/contact/daml/Contact.daml new file mode 100644 index 0000000000..6c5741d3d8 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/06-messaging/contact/daml/Contact.daml @@ -0,0 +1,13 @@ +-- Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +-- SPDX-License-Identifier: Apache-2.0 + +module Contact where + +import Message + +template Contact + with + myself : Party + other : Party + where + signatory myself diff --git a/canton-3x/community/app/src/pack/examples/06-messaging/contact/daml/Contact.solution b/canton-3x/community/app/src/pack/examples/06-messaging/contact/daml/Contact.solution new file mode 100644 index 0000000000..ed0e0390ff --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/06-messaging/contact/daml/Contact.solution @@ -0,0 +1,35 @@ +module Contact where + +import Daml.Script +import Message + +template Contact + with + myself : Party + other : Party + where + signatory myself + + nonconsuming choice Send: () + with + message: Text + controller myself + do + create Message with + sender = myself + receiver = other + message = message + pure () + +contactTest = script do + alice <- allocateParty "alice" + bob <- allocateParty "bob" + + contact <- submit alice do + createCmd Contact with myself = alice; other = bob + + submit alice do + exerciseCmd contact Send with message = "Hi Bob!" + + submit alice do + exerciseCmd contact Send with message = "How are you doing?" diff --git a/canton-3x/community/app/src/pack/examples/06-messaging/contact/frontend-config.js b/canton-3x/community/app/src/pack/examples/06-messaging/contact/frontend-config.js new file mode 100644 index 0000000000..b1220ab35a --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/06-messaging/contact/frontend-config.js @@ -0,0 +1,184 @@ +// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { DamlLfValue } from '@da/ui-core'; + +export const version = { + schema: 'navigator-config', + major: 2, + minor: 0, +}; + +export const customViews = (userId, party, role) => ({ + sent: { + type: "table-view", + title: "Sent", + source: { + type: "contracts", + filter: [ + { + field: "argument.sender", + value: party, + }, + { + field: "template.id", + value: "Message:Message", + } + ], + search: "", + sort: [ + { + field: "id", + direction: "ASCENDING" + } + ] + }, + columns: [ + { + key: "id", + title: "Contract ID", + createCell: ({rowData}) => ({ + type: "text", + value: rowData.id + }), + sortable: true, + width: 80, + weight: 0, + alignment: "left" + }, + { + key: "argument.receiver", + title: "To", + createCell: ({rowData}) => ({ + type: "text", + value: DamlLfValue.toJSON(rowData.argument).receiver + }), + sortable: true, + width: 80, + weight: 0, + alignment: "left" + }, + { + key: "argument.message", + title: "Message", + createCell: ({rowData}) => ({ + type: "text", + value: DamlLfValue.toJSON(rowData.argument).message + }), + sortable: true, + width: 80, + weight: 0, + alignment: "left" + } + ] + }, + received: { + type: "table-view", + title: "Received", + source: { + type: "contracts", + filter: [ + { + field: "argument.receiver", + value: party, + }, + { + field: "template.id", + value: "Message:Message", + } + ], + search: "", + sort: [ + { + field: "id", + direction: "ASCENDING" + } + ] + }, + columns: [ + { + key: "id", + title: "Contract ID", + createCell: ({rowData}) => ({ + type: "text", + value: rowData.id + }), + sortable: true, + width: 80, + weight: 0, + alignment: "left" + }, + { + key: "argument.sender", + title: "From", + createCell: ({rowData}) => ({ + type: "text", + value: DamlLfValue.toJSON(rowData.argument).sender + }), + sortable: true, + width: 80, + weight: 0, + alignment: "left" + }, + { + key: "argument.message", + title: "Message", + createCell: ({rowData}) => ({ + type: "text", + value: DamlLfValue.toJSON(rowData.argument).message + }), + sortable: true, + width: 80, + weight: 0, + alignment: "left" + } + ] + }, + contacts: { + type: "table-view", + title: "Contacts", + source: { + type: "contracts", + filter: [ + { + field: "template.id", + value: "Contact:Contact", + } + ], + search: "", + sort: [ + { + field: "id", + direction: "ASCENDING" + } + ] + }, + columns: [ + { + key: "id", + title: "Contract ID", + createCell: ({rowData}) => ({ + type: "text", + value: rowData.id + }), + sortable: true, + width: 80, + weight: 0, + alignment: "left" + }, + { + key: "argument.other", + title: "Contact", + createCell: ({rowData}) => ({ + type: "text", + value: DamlLfValue.toJSON(rowData.argument).other + }), + sortable: true, + width: 80, + weight: 0, + alignment: "left" + } + ] + } + +}) diff --git a/canton-3x/community/app/src/pack/examples/06-messaging/init.canton b/canton-3x/community/app/src/pack/examples/06-messaging/init.canton new file mode 100644 index 0000000000..1320bbc4ca --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/06-messaging/init.canton @@ -0,0 +1,58 @@ +nodes.local.start() + +val domainUrl = sys.props.get("canton-examples.domain-url").getOrElse("https://canton.global") + +val myself = participant1 + +if (myself.domains.list_registered().length == 0) { + myself.domains.connect("global", domainUrl) +} + +utils.retry_until_true(timeout = 60.seconds) { + myself.domains.active("global") +} + +myself.health.ping(myself) // make sure that the connection works + +// upload the dars + +import better.files._ + +val baseDir = sys.props.get("canton-examples.base-dir").getOrElse(".") +val messageDar: File = baseDir / "message" / ".daml" / "dist" / "message-0.0.1.dar" +val contactDar: File = baseDir / "contact" / ".daml" / "dist" / "contact-0.0.1.dar" +assert(messageDar.exists(), s"Message dar $messageDar isn't built") +assert(contactDar.exists(), s"Contact dar $contactDar isn't build") +myself.dars.upload(messageDar.pathAsString) +myself.dars.upload(contactDar.pathAsString) + +// if no parties have been onboarded, ask for the party name +val hostedParties = myself.parties.hosted() +if (hostedParties.length <= 1) { + val username = sys.props.get("canton-examples.username").getOrElse { + scala.io.StdIn.readLine("Enter the name under which you want to be found: ") + } + val user = myself.parties.enable(username, waitForDomain = DomainChoice.All) + println(s"Your suffixed user name is: ${user.toLf}\n") +} else { + val users = hostedParties.map(_.party.toLf).mkString("\n ") + println(s"Local user names:\n $users") +} + +val messageConf: File = baseDir / "message" / "ui-backend-participant1.conf" +utils.generate_navigator_conf(myself, Some(messageConf.toString)) +val contactConf: File = baseDir / "contact" / "ui-backend-participant1.conf" +utils.generate_navigator_conf(myself, Some(contactConf.toString)) + +println(s"Start Daml Navigator now with the following command from the messaging or contact folder:") +val ledgerApiPort = myself.config.clientLedgerApi.port +println(s"\n daml navigator server localhost $ledgerApiPort -t wallclock --port 7015 -c ui-backend-participant1.conf\n") + +def findUser(name: String): Unit = { + val users = myself.parties.list(filterParty = name).map(_.party.toLf) + println(users.mkString("\n")) +} + +println(s"You can search for other users with the following query in this Canton console:") +println("\n findUser(\"Alice\")\n") +println("If you want to send them a message, copy their user name into the receiver field.\n") diff --git a/canton-3x/community/app/src/pack/examples/06-messaging/message/.gitignore b/canton-3x/community/app/src/pack/examples/06-messaging/message/.gitignore new file mode 100644 index 0000000000..6250c7b91b --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/06-messaging/message/.gitignore @@ -0,0 +1,2 @@ +/.daml +ui-backend-participant1.conf diff --git a/canton-3x/community/app/src/pack/examples/06-messaging/message/daml.yaml b/canton-3x/community/app/src/pack/examples/06-messaging/message/daml.yaml new file mode 100644 index 0000000000..65fd0b8192 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/06-messaging/message/daml.yaml @@ -0,0 +1,9 @@ +sdk-version: 2.9.0-snapshot.20231128.12429.0.vcd189081 +sandbox-options: +- --wall-clock-time +name: message +source: daml +version: 0.0.1 +dependencies: +- daml-prim +- daml-stdlib diff --git a/canton-3x/community/app/src/pack/examples/06-messaging/message/daml/Message.daml b/canton-3x/community/app/src/pack/examples/06-messaging/message/daml/Message.daml new file mode 100644 index 0000000000..bc8403eeea --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/06-messaging/message/daml/Message.daml @@ -0,0 +1,58 @@ +-- Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +-- SPDX-License-Identifier: Apache-2.0 + +module Message where + +import Daml.Script + +template Message + with + sender : Party + receiver : Party + message : Text + where + signatory sender + observer receiver + + choice Retract: () + controller sender + do + pure () + + + nonconsuming choice Reply: () + with + reply: Text + controller receiver + do + create Message with + sender = receiver + receiver = sender + message = reply + pure () + + +messaging = script do + alice <- allocateParty "Alice" + bob <- allocateParty "Bob" + + submit alice do + createCmd Message with + sender = alice + receiver = bob + message = "Hi Bob!" + + submit bob do + createCmd Message with + sender = bob + receiver = alice + message = "Hi Alice! How are you doing?" + + anotherMsg <- submit alice do + createCmd Message with + sender = alice + receiver = bob + message = "Another message" + + submit alice do + exerciseCmd anotherMsg Retract diff --git a/canton-3x/community/app/src/pack/examples/06-messaging/message/frontend-config.js b/canton-3x/community/app/src/pack/examples/06-messaging/message/frontend-config.js new file mode 100644 index 0000000000..76626857a3 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/06-messaging/message/frontend-config.js @@ -0,0 +1,137 @@ +// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +import { DamlLfValue } from '@da/ui-core'; + +export const version = { + schema: 'navigator-config', + major: 2, + minor: 0, +}; + +export const customViews = (userId, party, role) => ({ + sent: { + type: "table-view", + title: "Sent", + source: { + type: "contracts", + filter: [ + { + field: "argument.sender", + value: party, + }, + { + field: "template.id", + value: "Message:Message", + } + ], + search: "", + sort: [ + { + field: "id", + direction: "ASCENDING" + } + ] + }, + columns: [ + { + key: "id", + title: "Contract ID", + createCell: ({rowData}) => ({ + type: "text", + value: rowData.id + }), + sortable: true, + width: 80, + weight: 0, + alignment: "left" + }, + { + key: "argument.receiver", + title: "To", + createCell: ({rowData}) => ({ + type: "text", + value: DamlLfValue.toJSON(rowData.argument).receiver + }), + sortable: true, + width: 80, + weight: 0, + alignment: "left" + }, + { + key: "argument.message", + title: "Message", + createCell: ({rowData}) => ({ + type: "text", + value: DamlLfValue.toJSON(rowData.argument).message + }), + sortable: true, + width: 80, + weight: 0, + alignment: "left" + } + ] + }, + received: { + type: "table-view", + title: "Received", + source: { + type: "contracts", + filter: [ + { + field: "argument.receiver", + value: party, + }, + { + field: "template.id", + value: "Message:Message", + } + ], + search: "", + sort: [ + { + field: "id", + direction: "ASCENDING" + } + ] + }, + columns: [ + { + key: "id", + title: "Contract ID", + createCell: ({rowData}) => ({ + type: "text", + value: rowData.id + }), + sortable: true, + width: 80, + weight: 0, + alignment: "left" + }, + { + key: "argument.sender", + title: "From", + createCell: ({rowData}) => ({ + type: "text", + value: DamlLfValue.toJSON(rowData.argument).sender + }), + sortable: true, + width: 80, + weight: 0, + alignment: "left" + }, + { + key: "argument.message", + title: "Message", + createCell: ({rowData}) => ({ + type: "text", + value: DamlLfValue.toJSON(rowData.argument).message + }), + sortable: true, + width: 80, + weight: 0, + alignment: "left" + } + ] + } +}) diff --git a/canton-3x/community/app/src/pack/examples/07-repair/README.md b/canton-3x/community/app/src/pack/examples/07-repair/README.md new file mode 100644 index 0000000000..6519a6885d --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/07-repair/README.md @@ -0,0 +1,38 @@ +# Repair Examples + +The repair example features multiple topologies for repair-based tutorials, each with different sets of participants +and domains depending on the demonstrated capability. + +## 1. [Recovering from a broken Domain](https://docs.daml.com/canton/usermanual/repairing.html#recovering-from-a-lost-domain) + +depends on files: +- Participant configurations: participant1.conf and participant2.conf +- Domain configurations: domain-repair-lost.conf and domain-repair-new.conf +- enable-preview-commands.conf to enable "preview" and "repair" commands +- Initialization script: domain-repair-init.canton that populates the participants and "lostDomain" with Iou contracts + +To set up this scenario, run + +``` + ../../bin/canton -Dcanton-examples.dar-path=../../dars/CantonExamples.dar \ + -c participant1.conf,participant2.conf,domain-repair-lost.conf,domain-repair-new.conf \ + -c ../03-advanced-configuration/storage/h2.conf,enable-preview-commands.conf \ + --bootstrap domain-repair-init.canton +``` + +## 2. [Importing contracts to Canton](https://docs.daml.com/canton/usermanual/repairing.html#importing-existing-contracts) + +depends on files: +- Participant configurations: participant1.conf, participant2.conf, participant3.conf for the "import ledger", and participant4.conf for the "export ledger" +- Domain configurations: domain-export-ledger.conf and domain-import-ledger.conf for the export and import ledgers respectively +- enable-preview-commands.conf to enable "preview" and "repair" commands +- Initialization script: import-ledger-init.canton that populates the export ledger with Paint agreement and Iou contracts + +To set up this scenario, run + +``` + ../../bin/canton -Dcanton-examples.dar-path=../../dars/CantonExamples.dar \ + -c participant1.conf,participant2.conf,participant3.conf,participant4.conf,domain-export-ledger.conf,domain-import-ledger.conf \ + -c ../03-advanced-configuration/storage/h2.conf,enable-preview-commands.conf \ + --bootstrap import-ledger-init.canton +``` diff --git a/canton-3x/community/app/src/pack/examples/07-repair/domain-export-ledger.conf b/canton-3x/community/app/src/pack/examples/07-repair/domain-export-ledger.conf new file mode 100644 index 0000000000..5ee6a7dacb --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/07-repair/domain-export-ledger.conf @@ -0,0 +1,15 @@ +canton { + domains { + exportLedgerDomain { + init.domain-parameters.unique-contract-keys = false + storage = ${_shared.storage} + storage.config.properties.databaseName = "domain1" + public-api { + port = 8038 + } + admin-api { + port = 8039 + } + } + } +} diff --git a/canton-3x/community/app/src/pack/examples/07-repair/domain-import-ledger.conf b/canton-3x/community/app/src/pack/examples/07-repair/domain-import-ledger.conf new file mode 100644 index 0000000000..73b9ac524b --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/07-repair/domain-import-ledger.conf @@ -0,0 +1,15 @@ +canton { + domains { + importLedgerDomain { + init.domain-parameters.unique-contract-keys = false + storage = ${_shared.storage} + storage.config.properties.databaseName = "domain2" + public-api { + port = 8048 + } + admin-api { + port = 8049 + } + } + } +} diff --git a/canton-3x/community/app/src/pack/examples/07-repair/domain-repair-init.canton b/canton-3x/community/app/src/pack/examples/07-repair/domain-repair-init.canton new file mode 100644 index 0000000000..e2f1410bcd --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/07-repair/domain-repair-init.canton @@ -0,0 +1,32 @@ +// upload the Daml model to all participants +val darPath = Option(System.getProperty("canton-examples.dar-path")).getOrElse("dars/CantonExamples.dar") +participants.all.dars.upload(darPath) + +val Alice = participant1.parties.enable("Alice") +val Bob = participant2.parties.enable("Bob", waitForDomain = DomainChoice.All) + +// setup parties and connect participants to domain +Seq(participant1, participant2).foreach(_.domains.connect_local(lostDomain)) + +utils.retry_until_true { + participant1.domains.active("lostDomain") && participant2.domains.active("lostDomain") +} + +import com.digitalasset.canton.BigDecimalImplicits._ +import com.digitalasset.canton.console.ParticipantReference +import com.digitalasset.canton.examples.java.iou +import com.digitalasset.canton.protocol.ContractIdSyntax._ + +import scala.jdk.CollectionConverters._ + +def createIou(participant: ParticipantReference, payer: PartyId, owner: PartyId, value: Int) = + participant.ledger_api.javaapi.commands.submit_flat(Seq(payer), new iou.Iou(payer.toProtoPrimitive, + owner.toProtoPrimitive, + new iou.Amount(value.toLong.toBigDecimal, "USD"), + List.empty.asJava + ).create.commands.asScala.toSeq) + +Seq(100, 120, 140).foreach { value => + createIou(participant1, Alice, Bob, value) + createIou(participant2, Bob, Alice, value + 10) +} diff --git a/canton-3x/community/app/src/pack/examples/07-repair/domain-repair-lost.conf b/canton-3x/community/app/src/pack/examples/07-repair/domain-repair-lost.conf new file mode 100644 index 0000000000..caf955170b --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/07-repair/domain-repair-lost.conf @@ -0,0 +1,15 @@ +canton { + domains { + lostDomain { + init.domain-parameters.unique-contract-keys = false + storage = ${_shared.storage} + storage.config.properties.databaseName = "domain1" + public-api { + port = 8018 + } + admin-api { + port = 8019 + } + } + } +} diff --git a/canton-3x/community/app/src/pack/examples/07-repair/domain-repair-new.conf b/canton-3x/community/app/src/pack/examples/07-repair/domain-repair-new.conf new file mode 100644 index 0000000000..d4a1c3d6c2 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/07-repair/domain-repair-new.conf @@ -0,0 +1,15 @@ +canton { + domains { + newDomain { + init.domain-parameters.unique-contract-keys = false + storage = ${_shared.storage} + storage.config.properties.databaseName = "domain2" + public-api { + port = 8028 + } + admin-api { + port = 8029 + } + } + } +} diff --git a/canton-3x/community/app/src/pack/examples/07-repair/enable-preview-commands.conf b/canton-3x/community/app/src/pack/examples/07-repair/enable-preview-commands.conf new file mode 100644 index 0000000000..0279d10f4f --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/07-repair/enable-preview-commands.conf @@ -0,0 +1,2 @@ +canton.features.enable-repair-commands = true +canton.features.enable-preview-commands = true diff --git a/canton-3x/community/app/src/pack/examples/07-repair/import-ledger-init.canton b/canton-3x/community/app/src/pack/examples/07-repair/import-ledger-init.canton new file mode 100644 index 0000000000..6a5da26c97 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/07-repair/import-ledger-init.canton @@ -0,0 +1,80 @@ +import com.digitalasset.canton.BigDecimalImplicits._ +import com.digitalasset.canton.console.ParticipantReference +import com.digitalasset.canton.examples.java.{iou, paint} +import com.digitalasset.canton.topology.SafeSimpleString +import com.digitalasset.canton.protocol.ContractIdSyntax._ + +import scala.jdk.CollectionConverters._ + +// Use participant4 for the "export ledger", so that the "import ledger" looks more natural using participants 1 through 3 +val exportLedger = participant4 + +// upload the Daml model to export ledger +val darPath = Option(System.getProperty("canton-examples.dar-path")).getOrElse("dars/CantonExamples.dar") +exportLedger.dars.upload(darPath) +exportLedger.domains.connect_local(exportLedgerDomain) + +// allocate parties +val houseOwners = Seq("Alice", "Bob", "Carol", "Dylan") +val painters = Seq("Kahlo", "OKeeffe", "Renoir", "vanGogh") +val banks = Seq("Citi", "PostFinance") + +(houseOwners ++ painters ++ banks).foreach(party => exportLedger.ledger_api.parties.allocate(party, party)) + +// create a bunch of OfferToPaintHouseByPainter and Iou contracts +def mayIPaintYourHouse(painter: PartyId, + houseOwner: PartyId, + bank: PartyId, + participant: ParticipantReference) = { + participant.ledger_api.javaapi.commands.submit_flat( + Seq(painter), + new paint + .OfferToPaintHouseByPainter(houseOwner.toProtoPrimitive, painter.toProtoPrimitive, bank.toProtoPrimitive, new iou.Amount(100.toBigDecimal, "USD")) + .create + .commands.asScala.toSeq) +} + +def createIou(payer: PartyId, owner: PartyId, participant: ParticipantReference) = { + participant.ledger_api.javaapi.commands.submit_flat( + Seq(payer), + new iou.Iou( + payer.toProtoPrimitive, + owner.toProtoPrimitive, + new iou.Amount(100.toBigDecimal, "USD"), + List.empty.asJava, + ).create.commands.asScala.toSeq, + ) + +} + +def toPartyId(name: String) = + exportLedger.parties.list(filterParty = name + SafeSimpleString.delimiter).map(_.party).head + +val houseOwnersOnLedger = houseOwners.map(toPartyId) +val paintersOnLedger = painters.map(toPartyId) +val banksOnLedger = banks.map(toPartyId) + +for { + houseOwner <- houseOwnersOnLedger + painter <- paintersOnLedger + bank <- banksOnLedger +} yield mayIPaintYourHouse(painter, houseOwner, bank, exportLedger) + +houseOwnersOnLedger.zip(banksOnLedger ++ banksOnLedger).foreach { + case (houseOwner, bank) => createIou(bank, houseOwner, exportLedger) +} + +// imports that make the scripts inlined in the tutorial less cluttered +import cats.syntax.either._ +import com.daml.ledger.api.v1.admin.party_management_service.PartyDetails +import com.daml.ledger.api.v1.event.CreatedEvent +import com.digitalasset.canton.LfPartyId +import com.digitalasset.canton.admin.api.client.commands.{GrpcAdminCommand, LedgerApiCommands} +import com.digitalasset.canton.admin.api.client.commands.LedgerApiTypeWrappers.WrappedCreatedEvent +import com.digitalasset.canton.console.ConsoleEnvironment +import com.digitalasset.canton.participant.ledger.api.client.ValueRemapper +import com.digitalasset.canton.protocol.SerializableContractWithWitnesses +import io.grpc.stub.AbstractStub + +// define clue for easier reuse of inlined test code in documentation guide +def clue[A](_string: String)(code: => A) = code diff --git a/canton-3x/community/app/src/pack/examples/07-repair/participant1.conf b/canton-3x/community/app/src/pack/examples/07-repair/participant1.conf new file mode 100644 index 0000000000..8cbf8e10de --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/07-repair/participant1.conf @@ -0,0 +1,18 @@ +canton { + features.enable-testing-commands = true // to enable testing.pcs_search for visibility into participant contracts + participants { + participant1 { + init.parameters { + unique-contract-keys = false + } + storage = ${_shared.storage} + storage.config.properties.databaseName = "participant1" + admin-api { + port = 8012 + } + ledger-api { + port = 8011 + } + } + } +} diff --git a/canton-3x/community/app/src/pack/examples/07-repair/participant2.conf b/canton-3x/community/app/src/pack/examples/07-repair/participant2.conf new file mode 100644 index 0000000000..163d015bf9 --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/07-repair/participant2.conf @@ -0,0 +1,18 @@ +canton { + features.enable-testing-commands = true // to enable testing.pcs_search for visibility into participant contracts + participants { + participant2 { + init.parameters { + unique-contract-keys = false + } + storage = ${_shared.storage} + storage.config.properties.databaseName = "participant2" + admin-api { + port = 8022 + } + ledger-api { + port = 8021 + } + } + } +} diff --git a/canton-3x/community/app/src/pack/examples/07-repair/participant3.conf b/canton-3x/community/app/src/pack/examples/07-repair/participant3.conf new file mode 100644 index 0000000000..bbe1299fde --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/07-repair/participant3.conf @@ -0,0 +1,18 @@ +canton { + features.enable-testing-commands = true // to enable testing.pcs_search for visibility into participant contracts + participants { + participant3 { + init.parameters { + unique-contract-keys = false + } + storage = ${_shared.storage} + storage.config.properties.databaseName = "participant3" + admin-api { + port = 8032 + } + ledger-api { + port = 8031 + } + } + } +} diff --git a/canton-3x/community/app/src/pack/examples/07-repair/participant4.conf b/canton-3x/community/app/src/pack/examples/07-repair/participant4.conf new file mode 100644 index 0000000000..5e4369246a --- /dev/null +++ b/canton-3x/community/app/src/pack/examples/07-repair/participant4.conf @@ -0,0 +1,18 @@ +canton { + features.enable-testing-commands = true // to enable testing.pcs_search for visibility into participant contracts + participants { + participant4 { + init.parameters { + unique-contract-keys = false + } + storage = ${_shared.storage} + storage.config.properties.databaseName = "participant4" + admin-api { + port = 8042 + } + ledger-api { + port = 8041 + } + } + } +} diff --git a/canton-3x/community/app/src/pack/lib/canton.ico b/canton-3x/community/app/src/pack/lib/canton.ico new file mode 100644 index 0000000000000000000000000000000000000000..0b2925c3b89a27fa9ae04745d6d1890e179f3870 GIT binary patch literal 55358 zcmeI533wD$w#P4^Ao>&?e9oxzj5?ps?Y;3E-#D(kEZ~k1b_7&FhQUz>8BvriJ6l*E zkOcxHfizo?eUmNlfB*p!RuK>oo&hl-KxgeG_5SDHu1=*$(pza>`ZQkl2z@S0h@PoT=MoOaAj2-`vr4=vdxLCcxCK8=5NOjF5f!w2PF^n z>jwBZY@VqEHgBr!K<1TBuNY5JY3B6W+`v;SNZz#vM6TR zHH}?elfWeD1iQX|?4L2-EGA#3J_Ze)_ll3uC9-+D!70+RFReNGOcwY|G>`Z-9`?#; zR=g^f6|IbBEQ8I1&xAR- zWpCQA22=6!SaxCFml*%Yvcg5fZoR&7TzmL3J1VwKu6EV`uHN^Zk8}QT%6@RcC${|= zt!VyW$(zSxMU5F!p=mg)*qX?$u8wEb`;%z=f4p?_gk;#VYp(j;-S;))BW#`(^N01A zH-wdKpVE|cseDH~=Hzo59i}U5;L9LxRqUXwHOU09Ej1O4cU)9=^;NjQ^KC>2FW3^(!ASf2hFx z!33N4taNrWkGV?ui!nYK$L7I*VV5x0P%QJ8?5Nvd%X(n^-|uN(dvm@2`8ekf=b=l( zta<7GiZKd$g!lv2n)H5@(p;H*nRAQb%ZzyjwroDe|8j5o+MCY=jE}H+SZB)G4j<_) zYi{Z-f&m5W{3==MHHGU*>hhGPl5&vHQ}`-hMVXe4IA# zJJ`H|wteY$lFcJNR;)EqY%kEqV6zgo?xo@-Uo;_~srvgRd_6EwjDCSFyO{0B%UB+L2gd*Yh-DttqJMpDLkE0h zpXm~GDGqan2kaDcJpi8xtg3ST7aPC0WE6Du4CxBjS;S{v!1zyj8CAaat5%=a4WAmH ziSkssTJq9Fp30s%e0ZPCocjh!rW1%h@Ba7BhxjS&6)i?Nex7CIr4rB5YZar~MP`>kY?8*t|;U(h}&>b5i9+ zv1WU8DRPR${Go8s2*fgxmysRS61J=le3^n^_p`Tu-=KVCpXn#)&`9`9zqL_rKjsdU zr$X@u#;d#xZ~Ig0&l#GJ&?U<4-w93~EPLnNYKKn?orwdN^IG>+FxqPb zd>r$Kg_Ng~N^7=1E%;J`Un5m)7ta6$qhGc9n~jH$$&4{6)xJOD-;Pb}FP)nCk#sqG zlmy=soTl(+HMHupK(^Be_|TdX?A!bfn9FhdMtqJX_Lq)N`cOKT_L+p|YHsooV}WdI z0#oLj#QY#H{kIr{obve2yjdlfIig zKsulPxrApu3}N$(!^cz%otc6)p+~9@Vf_xdCANZ0mL$w+9c!`=u)3_%_rZ4 z^@XeIq4Lw z&mq630QnybNol5$<^vkl*64oAcWAk*!N9@DU70(p$_3f&ivzTR1{kE(w zUf%NIKGWn6r61=Gkr4CNg}u`c9}~|H!Tu!=ihWE(en?&Y@9Lb_d}LjM&HMg;{w7^q z_%(c{2|?8*9q^I!t6nZyFo5S*No5kVG%)`AosY~3(h1VgeT|x#fRD@pXqGc zUnbc1abB#sJ z6*kYY52ClFW_X9b%}f?XP%1tF;@@yi2eBviv9Vl z#N_<9uw}v58RZW{mwaSSC@JRZZk~$Ud>s2*VlDFy%+dHW1GJa99uK|qshy|NaLm;; zAJY}svT5*T9<4r1c^R3^o;{ydci_|X2!TQFySt#Wq? z&&#M-H<;NCu)ngA`0#tBJeAqVQ>i27S|^`;(>biid=6b{O?x5I^H2QQjgy(dPzR6y zrs4RIKJl0u%dON91Mfo`XF^(>>`H-CS#QKBP;>k>^TzDkU2dY5Y%+j%7bkO?w?4 z5PKErrn9}c5u@`tKB>QSWI{jdwqZR=(|bREE+MSxh+)sLd*ebGi*3VL+<$w~DLVRy z!TD70jqX#PJ)pfgwM$#;lsBKK01rE42c&&$D7%&TBtYT~%E^d)NM{Pheo(dkv(DG& z^k`R^6d5ihcZ!gvzV&1|ct{SpiZTiEcr4hUWZ=cdyrLuJaN|~t#D};XP3&*`-w{1a zv)+HBG`Ul_9RfC-Pql+jwS$k$={wZ_2DyuKBAW+^ecFxA(ZMA~;R80+`{UoMSn*K@ z^Q^8B7NIjTn+Ew1Cktd1~v9@@>E{_bn# z;8Ik3DyEud>Dc-!`Xo4!7N?GamDiIqd_Z zZs5~`bcq;|=<(1aAJUopF&|W}`LxsZv~KeFuVhiz>xlZsADM}|ECrhk$RA}riV4%7 zF6oI6aXFIM&zAF55A*y!ub0&x|LbC+Jm)bVnb#RG`X{6{M9lMG?-F0rhc@YmPxZdo zJ{8Lbz5`z-!rHJcQ*x1Cd(DS9*&v%CuVP$kMx4fAF0KckDcF2SXAX_)YuPxYi+OI3 zS1alt|78yKl|J#2b?FM&Oo2Sg=i=Zb+rD6FQ?U3PM~oA`jD6?GcS}uu-y~ngR`w2VLppOH{{4!TfA4sGR@b&wp4&1t8>(x{hd5P2 zmO}nA^f56mjShp|3oxfZ^5Nq@VwqdM>}JmB)sQb!SO2>@r#T;4mwp1HQIOw37{y&l z#(-5IjsWtp?}_eRw&X+dWxN^xgT{wAQOsx;q+?tdYfhZv5yy)QKrGMSd`M>~mbq?l z=j&4Nv(@X(j3 ztN&e{3kV;X)k!e=7<0Tfes7f~yNqRe)BmnM6L9^J`N-t~uz45Kk{HS2D zCz0ma-L&#$T(PL@{h;$9PPBfz0P?2b)_@%lK=1Klw7=u+kMD;zW5W+aPZuHgpT;6b4ov z&L2GCLpoD1u5ZF%C||hqzE)#&)mrGFKDDd-zfOVxHt^es0-4~!ZWpJMPyBtEHK+p&~R;ZK228pLc|PK|<(Y~Lb~6VB~hYNs#;CM>mM zM01ppklhdi+B(DYISrkunIAaTl4f-XWhs&2EIBell%2xw2cOZ9LWtwp8Y7g3<>Q=R z%Q=zU4^rhc%;@$8OKblk*QLoHhEn+gWErH=7@Q2rhx@1CBK%X*85ipiSo>sLVDpkY zh5ZZI^oJbPKO;l&sfq7k4h_4;rM?)P`h3#cuQ24Oaa~HHGBW%R;4>9+MZcU3!AFjd zzBI9&h>wmy=X;w+m(nqerFIbWhZ&tCnt{!0kj;>r>kBo{A5Z?) z%*ZfCI>hye+r04B;PW};EJR212i@vV?e#e@iN~5k^ozcdsr~txU^JWNd0>>%h1h&%xuQ3qF)zl?C~eytfSHX2%DJi<&FqLTqB>%UqYH zcWK)MY}!CpK`epi6m&k6pV$k+^AqX0JCa}@GrPgwksdkbOCCPfdQW zV|)Q4P3C4=vxPq^#wEe(aqvloTnh}Rfb*gJ__dHw*e{+R|K=lD`wY{%SG8@A&mSD) zQ%YpStzgq0vIFAhm}@}!@aIdw=5DSl4u3}5p7dX9sqef4JtAEaF$l1^4>BBb0ph25 zUO@ObpUl*(P8@PV_)xvV&W;}*HR3kGoMF1 z3_gjFB5gQn&WE0_&xSmIxV5ll(_e)xa}oddwV#6h`IL@r+3dI9;5H9qR8z2tfNX$R zedDAlpCjPX7sB_apfSePx=gV9Rm;C$+Ombx-Vv+2A^(7!_MVX@e4P8c@HI@bWtf{8 zb^Q008FIRuYk{e>F4!sjX~-Oi7kMf^@u9qo&5&1Nzxe)02`|8xY2>!d9ez`IycP2Y zM{GPfvMuGQbb{nUJeWUt&F3t*(Eeh-f$%vqFTv;LQM(7oUxH5y+sFMT%2SDkxS6No zF`qJU5c~5v*2)^woVj+-YFC@xF_ew#DdrDTyZo7K-iwfLAXShC^vNSWw7+F{$Ze!6 zH@SGukL+ENBc6xXZse)B#pe=u#6upG|N4%q@x(%n-9Hk9q6kXt` zUqugyg^<+hkxn79pYd953l1ZQ(bP3z9QA-=BPoc%gr&?iq~^+QXDOKU9Qm` z;&n}P_$=x*&7ncmYnsCyqMiehj~4O^N7+PG_jgtO_tf{(N_7MDeUwL(vf}6u#VOuj zih78osNf~iQ@bb@lb?_SAom^iFv{!k{R=A8$}m zSASo1fA9ts*YgIIb!t#qrv{aEYEW6H25Z$rRdx060S(r=9u3y2D@=npAF6skL{(S+ zp1J-jn~YLc@H$jgSL>5n-_-i3`ncv*`g=KwR_T7!_DXHf)b>u%N4!5A)|^@u_Ohly zVNYus}i_cq1A_bM#&CuV@g> zIqKpoqw1*huPp!G=``J;tPfPD21DqDXb;14?eUZvq_^ewafiG)XFW$W=d7#8bH#Z7 E|D$GXzyJUM literal 0 HcmV?d00001 diff --git a/canton-3x/community/app/src/test/resources/advancedConfDef.env b/canton-3x/community/app/src/test/resources/advancedConfDef.env new file mode 100644 index 0000000000..f01f314075 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/advancedConfDef.env @@ -0,0 +1,4 @@ +POSTGRES_PASSWORD=supersafe +POSTGRES_USER=canton +JWT_URL="https://bla.fasel/jwks.key" +JWT_CERTIFICATE_FILE="community/app/src/test/resources/dummy.crt" \ No newline at end of file diff --git a/canton-3x/community/app/src/test/resources/config-snippets/disable-ammonite-cache.conf b/canton-3x/community/app/src/test/resources/config-snippets/disable-ammonite-cache.conf new file mode 100644 index 0000000000..abfefe67fb --- /dev/null +++ b/canton-3x/community/app/src/test/resources/config-snippets/disable-ammonite-cache.conf @@ -0,0 +1 @@ +canton.parameters.console.cache-dir = null diff --git a/canton-3x/community/app/src/test/resources/deprecated-configs/backwards-compatible.conf b/canton-3x/community/app/src/test/resources/deprecated-configs/backwards-compatible.conf new file mode 100644 index 0000000000..5f17614044 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/deprecated-configs/backwards-compatible.conf @@ -0,0 +1,36 @@ +canton { + monitoring { + health { + server { + address = 0.0.0.0 + port = 8000 + } + + check.type = is-active + check.participant = "my_node" + } + } + + participants { + participant1 { + storage.type = "memory" + storage.max-connections = 10 + storage.ledger-api-jdbc-url = "yes" + init.generate-legal-identity-certificate = true + init.startup-fail-fast = false + admin-api { + port = 10012 + } + ledger-api { + port = 10011 + max-deduplication-duration = "10m" + } + parameters.unique-contract-keys = false + } + } + + domains { + domain1.domain-parameters.unique-contract-keys = false + domain2.domain-parameters.unique-contract-keys = true + } +} diff --git a/canton-3x/community/app/src/test/resources/deprecated-configs/new-config-fields-take-precedence.conf b/canton-3x/community/app/src/test/resources/deprecated-configs/new-config-fields-take-precedence.conf new file mode 100644 index 0000000000..1e7e2e52ec --- /dev/null +++ b/canton-3x/community/app/src/test/resources/deprecated-configs/new-config-fields-take-precedence.conf @@ -0,0 +1,52 @@ +canton { + monitoring { + health { + server { + address = 0.0.0.0 + port = 8000 + } + + check.type = is-active + check.node = "my_node" + check.participant = "not_my_node" # Should be ignored because deprecated + } + } + + participants { + participant1 { + init { + identity.generate-legal-identity-certificate = true + parameters.unique-contract-keys = false + ledger-api.max-deduplication-duration = "10m" + startup-fail-fast = true # Should be ignored because deprecated + generate-legal-identity-certificate = false # Should be ignored because deprecated + } + storage.type = "memory" + storage.fail-fast-on-startup = true # Should be ignored because deprecated + storage.parameters.fail-fast-on-startup = false + storage.max-connections = 5 # Should be ignored because deprecated + storage.parameters.max-connections = 10 + storage.ledger-api-jdbc-url = "no" # Should be ignored because deprecated + storage.parameters.ledger-api-jdbc-url = "yes" + admin-api { + port = 10012 + } + ledger-api { + port = 10011 + max-deduplication-duration = "20m" # Should be ignored because deprecated + } + parameters.unique-contract-keys = true # Should be ignored because deprecated + } + } + + domains { + domain1.init.domain-parameters.unique-contract-keys = false + domain2.init.domain-parameters.unique-contract-keys = true + } + + ### Should be ignored, these values are deprecated and the ones above should be used instead + domains { + domain1.domain-parameters.unique-contract-keys = true + domain2.domain-parameters.unique-contract-keys = false + } +} diff --git a/canton-3x/community/app/src/test/resources/documentation-snippets/auth-token-config.conf b/canton-3x/community/app/src/test/resources/documentation-snippets/auth-token-config.conf new file mode 100644 index 0000000000..bf6cace71e --- /dev/null +++ b/canton-3x/community/app/src/test/resources/documentation-snippets/auth-token-config.conf @@ -0,0 +1,4 @@ +canton.domains.mydomain.public-api { + token-expiration-time = 60m + nonce-expiration-time = 1m +} \ No newline at end of file diff --git a/canton-3x/community/app/src/test/resources/documentation-snippets/caching-configs.conf b/canton-3x/community/app/src/test/resources/documentation-snippets/caching-configs.conf new file mode 100644 index 0000000000..c8826d7a47 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/documentation-snippets/caching-configs.conf @@ -0,0 +1,23 @@ +canton.participants.participant1 { + // tune caching configs of the ledger api server + ledger-api { + index-service { + max-contract-state-cache-size = 1000 // default 1e4 + max-contract-key-state-cache-size = 1000 // default 1e4 + + // The in-memory fan-out will serve the transaction streams from memory as they are finalized, rather than + // using the database. Therefore, you should choose this buffer to be large enough such that the likeliness of + // applications having to stream transactions from the database is low. Generally, having a 10s buffer is + // sensible. Therefore, if you expect e.g. a throughput of 20 tx/s, then setting this number to 200 is sensible. + // The default setting assumes 100 tx/s. + max-transactions-in-memory-fan-out-buffer-size = 200 // default 1000 + } + } + // tune the synchronisation protocols contract store cache + caching { + contract-store { + maximum-size = 1000 // default 1e6 + expire-after-access = 120s // default 10 minutes + } + } +} diff --git a/canton-3x/community/app/src/test/resources/documentation-snippets/command-service-max-commands-in-flight.conf b/canton-3x/community/app/src/test/resources/documentation-snippets/command-service-max-commands-in-flight.conf new file mode 100644 index 0000000000..afe8d0676a --- /dev/null +++ b/canton-3x/community/app/src/test/resources/documentation-snippets/command-service-max-commands-in-flight.conf @@ -0,0 +1 @@ +canton.participants.participant1.ledger-api.command-service.max-commands-in-flight = 256 // default value diff --git a/canton-3x/community/app/src/test/resources/documentation-snippets/console-timeouts.conf b/canton-3x/community/app/src/test/resources/documentation-snippets/console-timeouts.conf new file mode 100644 index 0000000000..25e4023de8 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/documentation-snippets/console-timeouts.conf @@ -0,0 +1,6 @@ +canton.parameters.timeouts.console = { + bounded = 2.minutes + unbounded = Inf // infinity + ledger-command = 2.minutes + ping = 30.seconds +} \ No newline at end of file diff --git a/canton-3x/community/app/src/test/resources/documentation-snippets/dev-version-support.conf b/canton-3x/community/app/src/test/resources/documentation-snippets/dev-version-support.conf new file mode 100644 index 0000000000..88b28c924f --- /dev/null +++ b/canton-3x/community/app/src/test/resources/documentation-snippets/dev-version-support.conf @@ -0,0 +1,20 @@ +canton.parameters { + # turn on non-standard configuration support + non-standard-config = yes + + # turn on support of development version support for domain nodes + dev-version-support = yes +} + +canton.domains.mydomain.init.domain-parameters { + # set the domain protocol version to `dev` (or to any other unstable protocol version) + # requires you to explicitly enable non-standard-config. not to be used for production. + protocol-version = dev +} + +canton.participants.participant1.parameters = { + # enable dev version on the participant (this will allow the participant to connect to a domain with dev protocol version) + # and it will turn on support for unsafe daml lf dev versions + # not to be used in production and requires you to define non-standard-config = yes + dev-version-support = yes +} diff --git a/canton-3x/community/app/src/test/resources/documentation-snippets/ledger-api-target-audience.conf b/canton-3x/community/app/src/test/resources/documentation-snippets/ledger-api-target-audience.conf new file mode 100644 index 0000000000..cd447817b5 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/documentation-snippets/ledger-api-target-audience.conf @@ -0,0 +1,13 @@ +canton { + participants { + participant { + ledger-api { + auth-services = [{ + type = jwt-rs-256-jwks + url = "https://target.audience.url/jwks.json" + target-audience = "https://rewrite.target.audience.url" + }] + } + } + } +} diff --git a/canton-3x/community/app/src/test/resources/documentation-snippets/ledger-api-target-scope.conf b/canton-3x/community/app/src/test/resources/documentation-snippets/ledger-api-target-scope.conf new file mode 100644 index 0000000000..84b8e8785d --- /dev/null +++ b/canton-3x/community/app/src/test/resources/documentation-snippets/ledger-api-target-scope.conf @@ -0,0 +1,13 @@ +canton { + participants { + participant { + ledger-api { + auth-services = [{ + type = jwt-rs-256-jwks + url = "https://some.url/jwks.json" + target-scope = "custom/Scope-5:with_special_characters" + }] + } + } + } +} diff --git a/canton-3x/community/app/src/test/resources/documentation-snippets/logging-event-details.conf b/canton-3x/community/app/src/test/resources/documentation-snippets/logging-event-details.conf new file mode 100644 index 0000000000..e7de128913 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/documentation-snippets/logging-event-details.conf @@ -0,0 +1,10 @@ +canton.monitoring.logging { + event-details = true + api { + message-payloads = true + max-method-length = 1000 + max-message-lines = 10000 + max-string-length = 10000 + max-metadata-size = 10000 + } +} diff --git a/canton-3x/community/app/src/test/resources/documentation-snippets/migrate-and-start.conf b/canton-3x/community/app/src/test/resources/documentation-snippets/migrate-and-start.conf new file mode 100644 index 0000000000..e6101d4ed1 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/documentation-snippets/migrate-and-start.conf @@ -0,0 +1 @@ +canton.participants.participant1.storage.parameters.migrate-and-start = yes diff --git a/canton-3x/community/app/src/test/resources/documentation-snippets/no-fail-fast.conf b/canton-3x/community/app/src/test/resources/documentation-snippets/no-fail-fast.conf new file mode 100644 index 0000000000..ef350732d5 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/documentation-snippets/no-fail-fast.conf @@ -0,0 +1 @@ +canton.participants.participant1.storage.parameters.fail-fast-on-startup = "no" \ No newline at end of file diff --git a/canton-3x/community/app/src/test/resources/documentation-snippets/non-standard-config.conf b/canton-3x/community/app/src/test/resources/documentation-snippets/non-standard-config.conf new file mode 100644 index 0000000000..630afc0af9 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/documentation-snippets/non-standard-config.conf @@ -0,0 +1 @@ +canton.parameters.non-standard-config = yes \ No newline at end of file diff --git a/canton-3x/community/app/src/test/resources/documentation-snippets/non-uck-mode.conf b/canton-3x/community/app/src/test/resources/documentation-snippets/non-uck-mode.conf new file mode 100644 index 0000000000..768cbac89a --- /dev/null +++ b/canton-3x/community/app/src/test/resources/documentation-snippets/non-uck-mode.conf @@ -0,0 +1,14 @@ +canton { + domains { + alpha { + // subsequent changes have no effect and the mode of a node can never be changed + init.domain-parameters.unique-contract-keys = false + } + } + participants { + participant1 { + // subsequent changes have no effect and the mode of a node can never be changed + init.parameters.unique-contract-keys = false + } + } +} \ No newline at end of file diff --git a/canton-3x/community/app/src/test/resources/documentation-snippets/postgres-ssl.conf b/canton-3x/community/app/src/test/resources/documentation-snippets/postgres-ssl.conf new file mode 100644 index 0000000000..2196e5e396 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/documentation-snippets/postgres-ssl.conf @@ -0,0 +1,27 @@ +_shared { + storage { + type = postgres + config { + dataSourceClass = "org.postgresql.ds.PGSimpleDataSource" + properties = { + serverName = "localhost" + serverName = ${?POSTGRES_HOST} + portNumber = "5432" + portNumber = ${?POSTGRES_PORT} + user = ${POSTGRES_USER} + password = ${POSTGRES_PASSWORD} + # The following settings can be used to configure an SSL connection to the Postgres DB + ssl = true + # Will verify that the server certificate is trusted + sslmode= "verify-ca" # Other options and their meaning can be found https://jdbc.postgresql.org/documentation/head/ssl-client.html + + # Optionally set with path to root certificate. Not necessary if the server certificate can be verified using the JRE root certificates + # sslrootcert = "path/to/root.cert" + + # For mTLS: + # sslcert= "path/to/client-cert.pem" + # sslkey= "path/to/client-key.p12" + } + } + } +} diff --git a/canton-3x/community/app/src/test/resources/documentation-snippets/startup-parallelism.conf b/canton-3x/community/app/src/test/resources/documentation-snippets/startup-parallelism.conf new file mode 100644 index 0000000000..86ca74d3d6 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/documentation-snippets/startup-parallelism.conf @@ -0,0 +1 @@ +canton.parameters.startup-parallelism = 8 diff --git a/canton-3x/community/app/src/test/resources/documentation-snippets/storage-queue-size.conf b/canton-3x/community/app/src/test/resources/documentation-snippets/storage-queue-size.conf new file mode 100644 index 0000000000..44c5d752f9 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/documentation-snippets/storage-queue-size.conf @@ -0,0 +1 @@ +canton.participants.participant1.storage.config.queueSize = 10000 \ No newline at end of file diff --git a/canton-3x/community/app/src/test/resources/dummy.crt b/canton-3x/community/app/src/test/resources/dummy.crt new file mode 100644 index 0000000000..56845f343c --- /dev/null +++ b/canton-3x/community/app/src/test/resources/dummy.crt @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIUFrBeuJu2h9bAHiiBgkoFwPJpKf8wDQYJKoZIhvcNAQEL +BQAwTDEQMA4GA1UECgwHVEVTVElORzEQMA4GA1UECwwHUk9PVCBDQTEmMCQGCSqG +SIb3DQEJARYXY2FudG9uQGRpZ2l0YWxhc3NldC5jb20wHhcNMjIwMTExMjM0NTQ0 +WhcNMjQwMTExMjM0NTQ0WjBfMRAwDgYDVQQKDAdURVNUSU5HMQ8wDQYDVQQLDAZE +T01BSU4xEjAQBgNVBAMMCWxvY2FsaG9zdDEmMCQGCSqGSIb3DQEJARYXY2FudG9u +QGRpZ2l0YWxhc3NldC5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDt6rnSm1ujbCAV3t2Rx9ZLI4wY11QrAYS9B8K5jdHeb6Bz2Ke4hRyytM15tWte +zLZAMN2/HgBqTNomjD6SfG4Kh2cF/8dwLGzPjhoFRaPLjpZhmBpN8GeoPOyTyEKd +BZPhE/jZ1yx8FSr3Ht27O2zsaweZPILBde6xC+TaBohwStc5MCWErCG0Fdvvnj1T +pP72uBI4XWAnoGnj69kZUIEgo1va2yMAmoFqiTfyKGGVnYOhGRSCRUbyRMM27apS +0O/3qZtQrQAnM7nipUD461cWUyTD9n6o9QDCMqLVmxjJSTfIZCv34t7movG8Dm1v +H/amr/cHmdVbk+wxg+h4YCQAhtEYcUDoKeZhB46uEVvfeFhhy2WSjjecAxFnj3/g +3BUMYjWQER0MrNpQ/w6/76VrLwicOwfvoHDv+0iKzaKUoRD/Iu2wKTO3VCozexe5 +ClEvJIeAxTWfNVskbQ4XNx7eu5PEWHCYROcffjCUNyXi23U96dC+72paJFUzwekP +IXF6+xH8jqFa2aQojmhKYl+A7XCAeUNvRjD3Pb0TZtALoZ6WwjiqA4xs3sdUwz8a +IyrjD2RjX+6UZkp/5WOOj4CZ/6KJejMJ/1u/JlHRhAewhA/uONuiYoFJEeJGnqVk +0CgDnneieV7WxkGhSYze0RXYi4xEbvXDkaiKwfyx2sVMLwIDAQABo1cwVTAfBgNV +HSMEGDAWgBSmoj0mj9DG42Ab1up+l3rh4fwgLzAJBgNVHRMEAjAAMAsGA1UdDwQE +AwIE8DAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8AAAEwDQYJKoZIhvcNAQELBQAD +ggIBAEOzJsb1E3MpQlPwQ6GsztiYxD/ur+qE4Iz2F3foteU0o4SrmnaKtttOt7rZ +w8eDJM3AoZKShkTCkuThwI5HCyjLovKJvGkLjntkmnwoceP/CLW0OE231Z+YozYg +kNJ+tNcnB6x/ehMZ5Hn//m1KvyoR3yw1h9P3IpSXe4TEZzKAvWFAwcfKrYzXi8Ps +K+G/qCoWdUdKm91fON58VmzFYrqFkhvQ0xTn63s2+RNExNfPjlM1xNXuF3z5OiEA +ae6bsNBpNqoLrOkBE3rrKR8UqkGQ+UdfCkOtBLwlWGQ/yglxwzbYsHaNasaZunkc ++4L4SvpMvqwiNze2pSZeQdU6F+ibNFTxtoxA6huj2fVZA4ugzpItrOm3Qc/oZbvX +iWC9znLfrReZugNF40eCRx1bZke7LmBAtIyu6+IFuMPTIm3FX4BnxXxn/dTvO5oT +W6Cjedf7VdSgi8Z+o2iUKouUANbgcTx2seH5zT3zQKnTHkAfpgf0Jpxxub5Rg3n8 +t+edF1Mdb2BihaL0Gde/dsSNJv495WkG98RFsNQlYQmY16sb79i8WdfKhdexXIJD ++9s3cMMRsYaHScPP4B53xlk5UNFw5ci8LhRfLmj54qGg9DlrK1R+de+zQJrGZzsx +EEiOhvhl0au19FIbmSjt7hlxD3RbJgKQisCoe/E4GCM+mjlx +-----END CERTIFICATE----- diff --git a/canton-3x/community/app/src/test/resources/examples b/canton-3x/community/app/src/test/resources/examples new file mode 120000 index 0000000000..2551851a3f --- /dev/null +++ b/canton-3x/community/app/src/test/resources/examples @@ -0,0 +1 @@ +../../pack/examples/ \ No newline at end of file diff --git a/canton-3x/community/app/src/test/resources/invalid-configs/bort.conf b/canton-3x/community/app/src/test/resources/invalid-configs/bort.conf new file mode 100644 index 0000000000..a3d6d720a0 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/invalid-configs/bort.conf @@ -0,0 +1,7 @@ +canton { + domains { + mydomain { + public-api.bort = 5018 + } + } +} \ No newline at end of file diff --git a/canton-3x/community/app/src/test/resources/invalid-configs/duplicate-storage.conf b/canton-3x/community/app/src/test/resources/invalid-configs/duplicate-storage.conf new file mode 100644 index 0000000000..bfb1fc5aaa --- /dev/null +++ b/canton-3x/community/app/src/test/resources/invalid-configs/duplicate-storage.conf @@ -0,0 +1,17 @@ +_shared { + storage { + type = postgres + config { + properties = { + serverName = "localhost" + portNumber = "5432" + databaseName = "canton" + user = "canton" + password = "supersafe" + } + } + } +} + +canton.participants.participant1.storage = ${_shared.storage} +canton.participants.participant2.storage = ${_shared.storage} diff --git a/canton-3x/community/app/src/test/resources/invalid-configs/include-missing-file.conf b/canton-3x/community/app/src/test/resources/invalid-configs/include-missing-file.conf new file mode 100644 index 0000000000..09efc13ba1 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/invalid-configs/include-missing-file.conf @@ -0,0 +1 @@ +include "this-file-does-not-exist.conf" diff --git a/canton-3x/community/app/src/test/resources/invalid-configs/invalid-node-names.conf b/canton-3x/community/app/src/test/resources/invalid-configs/invalid-node-names.conf new file mode 100644 index 0000000000..89397c9418 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/invalid-configs/invalid-node-names.conf @@ -0,0 +1,9 @@ +canton { + domains { + "my domain" {} + + "my`domain" {} + + "mydomain012345678901234567890123456789" {} + } +} \ No newline at end of file diff --git a/canton-3x/community/app/src/test/resources/invalid-configs/missing-bracket.conf b/canton-3x/community/app/src/test/resources/invalid-configs/missing-bracket.conf new file mode 100644 index 0000000000..4d850f5ff8 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/invalid-configs/missing-bracket.conf @@ -0,0 +1 @@ +canton { diff --git a/canton-3x/community/app/src/test/resources/invalid-configs/negative-port.conf b/canton-3x/community/app/src/test/resources/invalid-configs/negative-port.conf new file mode 100644 index 0000000000..0e15724fe8 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/invalid-configs/negative-port.conf @@ -0,0 +1,9 @@ +canton { + participants { + participant1 { + admin-api { + port = -5012 + } + } + } +} diff --git a/canton-3x/community/app/src/test/resources/invalid-configs/no-manual-start.conf b/canton-3x/community/app/src/test/resources/invalid-configs/no-manual-start.conf new file mode 100644 index 0000000000..d96e75eb63 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/invalid-configs/no-manual-start.conf @@ -0,0 +1 @@ +canton.parameters.manual-start = false \ No newline at end of file diff --git a/canton-3x/community/app/src/test/resources/invalid-configs/require-missing-file.conf b/canton-3x/community/app/src/test/resources/invalid-configs/require-missing-file.conf new file mode 100644 index 0000000000..579e49491e --- /dev/null +++ b/canton-3x/community/app/src/test/resources/invalid-configs/require-missing-file.conf @@ -0,0 +1,3 @@ +canton { + include required("this-file-does-not-exist.conf") +} \ No newline at end of file diff --git a/canton-3x/community/app/src/test/resources/invalid-configs/storage-url-with-password.conf b/canton-3x/community/app/src/test/resources/invalid-configs/storage-url-with-password.conf new file mode 100644 index 0000000000..fcec4a5f21 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/invalid-configs/storage-url-with-password.conf @@ -0,0 +1,13 @@ +_shared { + storage { + type = postgres + config { + properties = { + url = "jdbc:postgresql://localhost:5432/canton?user=canton&password=supersafe" + } + } + } +} + +canton.participants.participant1.storage = ${_shared.storage} +canton.participants.participant2.storage = ${_shared.storage} diff --git a/canton-3x/community/app/src/test/resources/invalid-configs/undefined-env-var.conf b/canton-3x/community/app/src/test/resources/invalid-configs/undefined-env-var.conf new file mode 100644 index 0000000000..1e510d9faa --- /dev/null +++ b/canton-3x/community/app/src/test/resources/invalid-configs/undefined-env-var.conf @@ -0,0 +1 @@ +value = ${UNDEFINED_ENV_VARIABLE} diff --git a/canton-3x/community/app/src/test/resources/invalid-configs/unknown-key-in-nested-config.conf b/canton-3x/community/app/src/test/resources/invalid-configs/unknown-key-in-nested-config.conf new file mode 100644 index 0000000000..78a30a4489 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/invalid-configs/unknown-key-in-nested-config.conf @@ -0,0 +1,6 @@ +canton { + # monitoring is a valid configuration key + monitoring { + this-is-not-a-key = "oh no it isn't!!" + } +} \ No newline at end of file diff --git a/canton-3x/community/app/src/test/resources/scripts/bootstrap-with-error-dynamic.canton b/canton-3x/community/app/src/test/resources/scripts/bootstrap-with-error-dynamic.canton new file mode 100644 index 0000000000..d471b5c24d --- /dev/null +++ b/canton-3x/community/app/src/test/resources/scripts/bootstrap-with-error-dynamic.canton @@ -0,0 +1,20 @@ +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.logging.TracedLogger +import org.slf4j.LoggerFactory +import ch.qos.logback.classic.Logger + +val logger = TracedLogger(LoggerFactory.getLogger("com.digitalasset.canton").asInstanceOf[Logger]) + +TraceContext.withNewTraceContext { implicit traceContext => + logger.debug("some logging debug event") + logger.error("some logging error") + logger.debug("some more logging debug event") + logger.error("some more logging error") + + logging.set_level(level = "DEBUG") + logger.debug("final logging debug event") + logger.error("final logging error") +} + +nodes.local.stop() +sys.exit(0) diff --git a/canton-3x/community/app/src/test/resources/scripts/bootstrap-with-error.canton b/canton-3x/community/app/src/test/resources/scripts/bootstrap-with-error.canton new file mode 100644 index 0000000000..d383c0bdde --- /dev/null +++ b/canton-3x/community/app/src/test/resources/scripts/bootstrap-with-error.canton @@ -0,0 +1,16 @@ +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.logging.TracedLogger +import org.slf4j.LoggerFactory +import ch.qos.logback.classic.Logger + +val logger = TracedLogger(LoggerFactory.getLogger("com.digitalasset.canton").asInstanceOf[Logger]) + +TraceContext.withNewTraceContext { implicit traceContext => + logger.debug("some logging debug event") + logger.error("some logging error") + logger.debug("some more logging debug event") + logger.error("some more logging error") +} + +nodes.local.stop() +sys.exit(0) diff --git a/canton-3x/community/app/src/test/resources/scripts/bootstrap.canton b/canton-3x/community/app/src/test/resources/scripts/bootstrap.canton new file mode 100644 index 0000000000..6c51c8b3fd --- /dev/null +++ b/canton-3x/community/app/src/test/resources/scripts/bootstrap.canton @@ -0,0 +1,4 @@ +println("The last emperor is always the worst.") +// TODO(i7023) remove stop again +nodes.local.stop() +sys.exit(0) diff --git a/canton-3x/community/app/src/test/resources/scripts/participant1.canton b/canton-3x/community/app/src/test/resources/scripts/participant1.canton new file mode 100644 index 0000000000..fc7017bae2 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/scripts/participant1.canton @@ -0,0 +1,7 @@ +// This file is named participant1.canton specifically to be used in a test checking that the file name +// of the boostrap script does not shadow script variables. Do not change this file name. +println("The last emperor is always the worst.") +println(participant1.health.status) +// TODO(i7023) remove stop again +nodes.local.stop() +sys.exit(0) diff --git a/canton-3x/community/app/src/test/resources/scripts/run.canton b/canton-3x/community/app/src/test/resources/scripts/run.canton new file mode 100644 index 0000000000..4347cf26d4 --- /dev/null +++ b/canton-3x/community/app/src/test/resources/scripts/run.canton @@ -0,0 +1,3 @@ +println("The last emperor is always the worst.") +// TODO(i7023) remove stop again +nodes.local.stop() diff --git a/canton-3x/community/app/src/test/resources/scripts/startup.canton b/canton-3x/community/app/src/test/resources/scripts/startup.canton new file mode 100644 index 0000000000..1a7183b98d --- /dev/null +++ b/canton-3x/community/app/src/test/resources/scripts/startup.canton @@ -0,0 +1,5 @@ +println(s"connected: ${participants.local.map(_.domains.list_connected().nonEmpty)}") +println("The last emperor is always the worst.") +// TODO(i7023) remove stop again +nodes.local.stop() +sys.exit(0) diff --git a/canton-3x/community/app/src/test/scala/com/digitalasset/canton/ConfigStubs.scala b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/ConfigStubs.scala new file mode 100644 index 0000000000..a6e8b3d19e --- /dev/null +++ b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/ConfigStubs.scala @@ -0,0 +1,45 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton + +import cats.syntax.option.* +import com.digitalasset.canton.config.RequireTypes.Port +import com.digitalasset.canton.config.{ + CommunityAdminServerConfig, + CommunityCryptoConfig, + CommunityStorageConfig, +} +import com.digitalasset.canton.domain.config.{CommunityDomainConfig, DomainInitConfig} +import com.digitalasset.canton.participant.config.{ + CommunityParticipantConfig, + ParticipantInitConfig, +} + +/** Utilities for creating config objects for tests + */ +@SuppressWarnings(Array("org.wartremover.warts.Null")) +object ConfigStubs { + def participant: CommunityParticipantConfig = + CommunityParticipantConfig( + ParticipantInitConfig(), + CommunityCryptoConfig(), + null, + None, + adminApi, + CommunityStorageConfig.Memory(), + ) + + def domain: CommunityDomainConfig = + CommunityDomainConfig( + DomainInitConfig(), + false, + null, + null, + CommunityStorageConfig.Memory(), + CommunityCryptoConfig(), + ) + + def adminApi: CommunityAdminServerConfig = + CommunityAdminServerConfig(internalPort = Port.tryCreate(42).some) +} diff --git a/canton-3x/community/app/src/test/scala/com/digitalasset/canton/admin/api/client/GrpcCtlRunnerTest.scala b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/admin/api/client/GrpcCtlRunnerTest.scala new file mode 100644 index 0000000000..31030b2e14 --- /dev/null +++ b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/admin/api/client/GrpcCtlRunnerTest.scala @@ -0,0 +1,55 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client + +import cats.implicits.* +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand +import io.grpc.stub.AbstractStub +import io.grpc.{CallOptions, Channel, ManagedChannel} +import org.scalatest.wordspec.AsyncWordSpec + +import scala.concurrent.Future +import scala.concurrent.duration.* + +class GrpcCtlRunnerTest extends AsyncWordSpec with BaseTest { + + "Runner" when { + "running a successful command" should { + val (channel, command) = defaultMocks() + + "run successfully" in { + new GrpcCtlRunner(1000, 1000, loggerFactory).run( + "participant1", + command, + channel, + None, + 1000.milliseconds, + ) map { result => + result shouldBe "result" + } + } + } + } + + class TestAbstractStub(channel: Channel) extends AbstractStub[TestAbstractStub](channel) { + override def build(channel: Channel, callOptions: CallOptions): TestAbstractStub = this + } + + private def defaultMocks(): (ManagedChannel, GrpcAdminCommand[String, String, String]) = { + val channel = mock[ManagedChannel] + val service = new TestAbstractStub(channel) + val command = new GrpcAdminCommand[String, String, String] { + override type Svc = TestAbstractStub + override def createService(channel: ManagedChannel): Svc = service + override def createRequest(): Either[String, String] = Right("request") + override def submitRequest(service: Svc, request: String): Future[String] = + if (service == service && request == "request") Future.successful("response") + else Future.failed(new Exception("Invalid")) + override def handleResponse(response: String): Either[String, String] = Right("result") + } + + (channel, command) + } +} diff --git a/canton-3x/community/app/src/test/scala/com/digitalasset/canton/admin/api/client/data/MeteringTest.scala b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/admin/api/client/data/MeteringTest.scala new file mode 100644 index 0000000000..1e701d1828 --- /dev/null +++ b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/admin/api/client/data/MeteringTest.scala @@ -0,0 +1,33 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.admin.api.client.data + +import io.circe.parser.* +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpec + +class MeteringTest extends AnyWordSpec with Matchers { + + private val sample = + """ + |{ + | "s": "abc", + | "b": true, + | "n": null, + | "d": 2.3, + | "a": [1,2,3], + | "o": { "x": 1, "y": 2 } + |} + |""".stripMargin + + "StructEncoderDecoder" should { + "serialize/deserialize without loss" in { + val expected = parse(sample) + val struct = expected.flatMap(j => StructEncoderDecoder(j.hcursor)) + val actual = struct.map(s => StructEncoderDecoder(s)) + actual shouldBe expected + } + } + +} diff --git a/canton-3x/community/app/src/test/scala/com/digitalasset/canton/cli/CliTest.scala b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/cli/CliTest.scala new file mode 100644 index 0000000000..98ac6d7a4d --- /dev/null +++ b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/cli/CliTest.scala @@ -0,0 +1,125 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.cli + +import ch.qos.logback.classic.Level +import com.digitalasset.canton.BaseTest +import org.scalatest.wordspec.AnyWordSpec + +import java.io.{ByteArrayOutputStream, File} + +class CliTest extends AnyWordSpec with BaseTest { + "parse" can { + "when no config files are provided" should { + "fail horribly" in { + val (result, _) = parse("") + + result shouldBe None + } + } + + "when a single config file is provided" should { + "work fine" in { + val (maybeCli: Option[Cli], _) = parse("--config some-file.conf") + + maybeCli.value.configFiles.map(_.getName) should contain only "some-file.conf" + } + } + + "when a bunch of config files are provided" should { + "parse them all in order" in { + val (maybeCli: Option[Cli], _) = parse("--config first.conf --config second.conf") + + maybeCli.value.configFiles.map(_.getName) shouldBe Seq("first.conf", "second.conf") + } + } + + "when config key values are provided" should { + "parse them all" in { + val (maybeCli: Option[Cli], _) = + parse("-C canton.a.b=foo -C canton.c.d=bar,canton.e.f=baba -C canton.a.b=ignored") + maybeCli.value.configMap shouldBe Map( + "canton.a.b" -> "foo", + "canton.c.d" -> "bar", + "canton.e.f" -> "baba", + ) + } + } + + "when setting flags" should { + "parse them all successfully" in { + val (maybeCli: Option[Cli], _) = + parse("--config first.conf -v --no-tty --auto-connect-local") + val cli = maybeCli.value + cli.levelCanton should contain(Level.DEBUG) + cli.noTty && cli.autoConnectLocal shouldBe true + } + "parse logging flags successfully" in { + val (maybeCli: Option[Cli], _) = parse( + "--auto-connect-local -C canton.parameters.manual-start=yes --log-truncate --log-level-root=DEBUG --log-level-canton=DEBUG --log-level-stdout=ERROR --log-file-appender=rolling --log-file-name=log/wurst.log --log-file-rolling-history=20 --log-file-rolling-pattern=YYYY-mm-dd-HH --log-last-errors=false" + ) + val cli = maybeCli.value + cli.logFileAppender shouldBe LogFileAppender.Rolling + cli.levelRoot shouldBe Some(Level.DEBUG) + cli.levelCanton shouldBe Some(Level.DEBUG) + cli.levelStdout shouldBe Level.ERROR + cli.logTruncate shouldBe true + cli.logFileName should contain("log/wurst.log") + cli.logFileHistory should contain(20) + cli.logFileRollingPattern should contain("YYYY-mm-dd-HH") + cli.logLastErrors shouldBe false + cli.autoConnectLocal shouldBe true + } + + "ensure that command flags allow overriding profile definitions" in { + val (maybeCli: Option[Cli], _) = + parse( + "--config first.conf --log-file-rolling-history=20 --log-profile=container --log-level-stdout=TRACE" + ) + val cli = maybeCli.value + cli.logFileHistory should contain(10) // should be overridden by log profile + cli.levelStdout shouldBe Level.TRACE + } + + } + + "daemon" should { + "capture bootstrap script if provided" in { + val (maybeCli: Option[Cli], _) = + parse("daemon --bootstrap my/script.canton --config whatever.conf") + + val cli = maybeCli.value + val script = cli.bootstrapScriptPath + + script.value.toString shouldBe "my/script.canton" + } + } + + "run" should { + "fail if a script is not provided" in { + val (result, _) = parse("run") + + result shouldBe None + } + "set the command if a script is available" in { + val (maybeCli: Option[Cli], _) = parse("run some-file.sc --config some.conf") + + maybeCli.value.command match { + case Some(Command.RunScript(file: File)) => + file.getPath shouldBe "some-file.sc" + case _ => fail() + } + } + } + } + + private def parse(text: String): (Option[Cli], String) = { + val args: Array[String] = if (!text.isEmpty) text.split(" ") else Array() + val errOutput = new ByteArrayOutputStream() + + val result = Console.withErr(errOutput) { Cli.parse(args) } + + (result, errOutput.toString) + } +} diff --git a/canton-3x/community/app/src/test/scala/com/digitalasset/canton/config/CantonCommunityConfigTest.scala b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/config/CantonCommunityConfigTest.scala new file mode 100644 index 0000000000..11a70260e8 --- /dev/null +++ b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/config/CantonCommunityConfigTest.scala @@ -0,0 +1,368 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +import better.files.{File, *} +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.config.ConfigErrors.{ + CannotParseFilesError, + CannotReadFilesError, + CantonConfigError, + GenericConfigError, + NoConfigFiles, + SubstitutionError, +} +import com.digitalasset.canton.logging.SuppressingLogger.LogEntryOptionality +import com.digitalasset.canton.logging.{ErrorLoggingContext, LogEntry, SuppressionRule} +import com.digitalasset.canton.version.HandshakeErrors.DeprecatedProtocolVersion +import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory} +import org.scalatest.wordspec.AnyWordSpec + +class CantonCommunityConfigTest extends AnyWordSpec with BaseTest { + + import scala.jdk.CollectionConverters.* + val simpleConf = "examples/01-simple-topology/simple-topology.conf" + "the example simple topology configuration" should { + lazy val config = + loadFile(simpleConf).valueOrFail("failed to load simple-topology.conf") + + "contain a couple of participants" in { + config.participants should have size 2 + } + + "contain a single domain" in { + config.domains should have size 1 + } + + "produce a port definition message" in { + config.portDescription shouldBe "mydomain:admin-api=5019,public-api=5018;participant1:admin-api=5012,ledger-api=5011;participant2:admin-api=5022,ledger-api=5021" + } + + } + + "deprecated configs" should { + val expectedWarnings = LogEntry.assertLogSeq( + Seq( + ( + _.message should (include("Config field") and include("is deprecated")), + "deprecated field not logged", + ), + ( + _.message should (include("Config path") and include("is deprecated")), + "deprecated path not logged", + ), + ), + Seq.empty, + ) _ + + def deprecatedConfigChecks(config: CantonCommunityConfig) = { + import scala.concurrent.duration.* + + config.monitoring.health.foreach { health => + health.check match { + case CheckConfig.IsActive(node) => node shouldBe Some("my_node") + case _ => + } + } + + val (_, participantConfig) = config.participants.headOption.value + participantConfig.init.ledgerApi.maxDeduplicationDuration.duration.toSeconds shouldBe 10.minutes.toSeconds + participantConfig.init.parameters.uniqueContractKeys shouldBe false + participantConfig.init.identity.map(_.generateLegalIdentityCertificate) shouldBe Some(true) + participantConfig.storage.parameters.failFastOnStartup shouldBe false + participantConfig.storage.parameters.maxConnections shouldBe Some(10) + participantConfig.storage.parameters.ledgerApiJdbcUrl shouldBe Some("yes") + + def domain(name: String) = config.domains + .find(_._1.unwrap == name) + .value + ._2 + + val domain1Parameters = domain("domain1").init.domainParameters + val domain2parameters = domain("domain2").init.domainParameters + + domain1Parameters.uniqueContractKeys shouldBe false + domain2parameters.uniqueContractKeys shouldBe true + } + + // In this test case, both deprecated and new fields are set with opposite values, we make sure the new fields + // are used + "load with new fields set" in { + loggerFactory.assertLogsSeq(SuppressionRule.Level(org.slf4j.event.Level.INFO))( + { + val parsed = loadFile("deprecated-configs/new-config-fields-take-precedence.conf").value + deprecatedConfigChecks(parsed) + }, + expectedWarnings, + ) + } + + // In this test case, only the deprecated fields are set, we make sure they get used as fallbacks + "be backwards compatible" in { + loggerFactory.assertLogsSeq(SuppressionRule.Level(org.slf4j.event.Level.INFO))( + { + val parsed = loadFile("deprecated-configs/backwards-compatible.conf").value + deprecatedConfigChecks(parsed) + }, + expectedWarnings, + ) + } + + "disable autoInit to false" in { + val config = + ConfigFactory + .parseFile((baseDir.toString / "deprecated-configs/backwards-compatible.conf").toJava) + .withValue( + "canton.participants.participant1.init.auto-init", + ConfigValueFactory.fromAnyRef(false), + ) + loggerFactory.assertLogsSeq(SuppressionRule.Level(org.slf4j.event.Level.INFO))( + { + val parsed = CantonCommunityConfig.load(config).value + parsed.participants.headOption.value._2.init.autoInit shouldBe false + }, + expectedWarnings, + ) + } + } + + "the invalid node names configuration" should { + "return an error" in { + loggerFactory.assertLogs( + { + val result = loadFile("invalid-configs/invalid-node-names.conf") + inside(result.left.value) { case GenericConfigError.Error(cause) => + cause should include( + "Node name is too long. Max length: 30. Length: 38. Name: \"mydomain0123456789012345678901...\"" + ) + cause should include( + "Node name contains invalid characters (allowed: [a-zA-Z0-9_-]): \"my`domain\"" + ) + cause should include( + "Node name contains invalid characters (allowed: [a-zA-Z0-9_-]): \"my domain\"" + ) + } + }, + entry => { + entry.shouldBeCantonErrorCode(GenericConfigError.code) + val cause = entry.errorMessage + cause should include( + "Node name is too long. Max length: 30. Length: 38. Name: \"mydomain0123456789012345678901...\"" + ) + // The other causes get truncated away, unfortunately. + // See https://github.com/digital-asset/daml/issues/12785 + }, + ) + } + } + + // test that fails because we misspelled 'port' as 'bort' + "the bort configuration" should { + "return an error mentioning the bort issue" in { + val result = loggerFactory.assertLogs( + loadFiles(Seq(simpleConf, "invalid-configs/bort.conf")), + _.errorMessage should (include("bort.conf") and include("Unknown key")), + ) + result.left.value shouldBe a[GenericConfigError.Error] + } + } + + // test that fails because of missing '{' in .conf-file + "the missing-bracket configuration" should { + "return a CannotParseFilesError during loading when combined with simple config" in { + val result = + loggerFactory.assertLogs( + loadFiles(Seq(simpleConf, "invalid-configs/missing-bracket.conf")), + _.mdc("err-context") should (include("missing-bracket.conf") and include( + "expecting a close parentheses ')' here, not: end of file" + )), + ) + result.left.value shouldBe a[CannotParseFilesError.Error] + } + + "return a CannotParseFilesError during loading when not combined with simple config" in { + val result = + loggerFactory.assertLogs( + loadFiles(Seq(simpleConf, "invalid-configs/missing-bracket.conf")), + _.mdc("err-context") should (include("missing-bracket.conf") and include( + "expecting a close parentheses ')' here, not: end of file" + )), + ) + result.left.value shouldBe a[CannotParseFilesError.Error] + } + } + + "the negative-port configuration" should { + + "return a sensible error message during loading" in { + val result = + loggerFactory.assertLogs( + loadFiles(Seq(simpleConf, "invalid-configs/negative-port.conf")), + _.errorMessage should (include("negative-port.conf") and include("Unable to create Port")), + ) + result.left.value shouldBe a[GenericConfigError.Error] + } + } + + // test that fails because of using env variable substitution with a non-existent env variable + "the undefined-env-var configuration" should { + + "return an error during loading" in { + // defined like this because instantiating the error will automatically lead to another error message being logged + val code = loggerFactory.assertLogs( + SubstitutionError.Error(Seq()).code.id, + _.message should include(""), + ) + val result = loggerFactory.assertLogs( + loadFile("invalid-configs/undefined-env-var.conf"), + logEntry => { + logEntry.mdc("err-context") should (include("UNDEFINED_ENV_VARIABLE") and include( + "undefined-env-var.conf" + )) + logEntry.errorMessage should include(code) + }, + ) + result.left.value shouldBe a[SubstitutionError.Error] + } + } + + // confs with missing files for includes + // no error despite missing include + "the include-missing-file configuration" should { + lazy val config = + loadFiles(Seq(simpleConf, "invalid-configs/include-missing-file.conf")) + .valueOrFail("failed to load include-missing-file.conf") + + "contain a couple of participants2" in { + config.participants should have size 2 + } + } + + // tests that fails because of a `include required` of a missing file + "the require-missing-file configuration" should { + "throw a meaningful error message during loading" in { + // sadly, we don't have enough information at the time the error is thrown to also include + // `require-missing-file.conf` in the error message + val result = loggerFactory.assertLogs( + loadFiles(Seq("invalid-configs/require-missing-file.conf", simpleConf)), + _.mdc("err-context") should (include("this-file-does-not-exist.conf") and include( + "resource not found" + )), + ) + result.left.value shouldBe a[CannotParseFilesError.Error] + } + } + + "configuration file with unknown keys" should { + "should return an error" in { + val result = + loggerFactory.assertLogs( + loadFile("invalid-configs/unknown-key-in-nested-config.conf"), + _.errorMessage should include("canton.monitoring.this-is-not-a-key"), + ) + result.left.value shouldBe a[GenericConfigError.Error] + } + } + + "load with multiple config files" should { + lazy val config1: Config = ConfigFactory.parseMap( + Map( + "item1" -> "config1", + "item2" -> "config1", + ).asJava + ) + lazy val config2: Config = ConfigFactory.parseMap( + Map( + "item2" -> "config2", + "item3" -> "config2", + ).asJava + ) + lazy val combinedConfig = CantonConfig.mergeConfigs(config1, Seq(config2)) + "prefer the right hand config where multiple keys are defined" in { + combinedConfig.getString("item1") shouldBe "config1" + // this is defined in both, but as config2 was provided last it should provide the value + combinedConfig.getString("item2") shouldBe "config2" + // this is missing from config1 + combinedConfig.getString("item3") shouldBe "config2" + } + + "load with no config files" should { + "return None" in { + val result = + loggerFactory.assertLogs( + loadFiles(Seq()), + _.errorMessage should include("No config files"), + ) + + result.left.value shouldBe a[NoConfigFiles.Error] + + } + } + + "load with files that cannot be read" should { + "will log errors for all files that can't be read" in { + val result = loggerFactory.assertLogs( + loadFiles(Seq("file-1", "file-2")), + _.mdc("err-context") should (include("file-1") and include("file-2")), + ) + result.left.value shouldBe a[CannotReadFilesError.Error] + } + } + + "config validation on duplicate storage" should { + "return a ValidationError during loading" in { + val result = loggerFactory.assertLogs( + loadFiles(Seq(simpleConf, "invalid-configs/duplicate-storage.conf")), + _.errorMessage should (include("Failed to validate the configuration") + and include("participant1") and include("participant2")), + ) + result.left.value shouldBe a[ConfigErrors.ValidationError.Error] + } + "not log the password when url or jdbcUrl is set" in { + val result = loggerFactory.assertLogs( + loadFiles(Seq(simpleConf, "invalid-configs/storage-url-with-password.conf")), + _.errorMessage should (include("Failed to validate the configuration") + and include("participant1") and include("participant2") + and not include "password=" and not include "supersafe"), + ) + result.left.value shouldBe a[ConfigErrors.ValidationError.Error] + } + } + } + + "parsing our config example snippets" should { + "succeed on all examples" in { + val inputDir = baseDir / "documentation-snippets" + inputDir + .list(_.extension.contains(".conf")) + .foreach(file => + loggerFactory.assertLogsUnorderedOptional( + loadFiles(Seq(simpleConf, "documentation-snippets/" + file.name)) + .valueOrFail( + "failed to load " + file.name + ), + LogEntryOptionality.Optional -> (entry => + entry.shouldBeCantonErrorCode(DeprecatedProtocolVersion) + ), + ) + ) + } + } + + private def loadFile(resourcePath: String): Either[CantonConfigError, CantonCommunityConfig] = { + loadFiles(Seq(resourcePath)) + } + + val elc: ErrorLoggingContext = ErrorLoggingContext(logger, loggerFactory.properties, traceContext) + + private def loadFiles( + resourcePaths: Seq[String] + ): Either[CantonConfigError, CantonCommunityConfig] = { + val files = resourcePaths.map(r => (baseDir.toString / r).toJava) + CantonCommunityConfig.parseAndLoad(files) + } + + lazy val baseDir: File = "community" / "app" / "src" / "test" / "resources" + +} diff --git a/canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/AmmoniteCacheLockTest.scala b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/AmmoniteCacheLockTest.scala new file mode 100644 index 0000000000..455581aab5 --- /dev/null +++ b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/AmmoniteCacheLockTest.scala @@ -0,0 +1,53 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import better.files.File +import com.digitalasset.canton.BaseTestWordSpec +import os.Path + +class AmmoniteCacheLockTest extends BaseTestWordSpec { + + private lazy val testDir = Path(File.newTemporaryDirectory().deleteOnExit().path) + + "concurrent access to ammonite cache" should { + "work in the happy case" in { + val lock = AmmoniteCacheLock.create(logger, testDir, isRepl = false) + + lock.lockFile.value.exists() shouldBe true + + lock.release() + + lock.lockFile.value.exists() shouldBe false + + } + + "reuses previous cache" in { + + val lock = AmmoniteCacheLock.create(logger, testDir, isRepl = false) + lock.release() + + val lock2 = AmmoniteCacheLock.create(logger, testDir, isRepl = false) + + lock.lockFile should not be empty + lock.lockFile shouldBe lock2.lockFile + + } + + "prevent concurrent access" in { + val lock = AmmoniteCacheLock.create(logger, testDir, isRepl = false) + val lock2 = AmmoniteCacheLock.create(logger, testDir, isRepl = false) + + lock2.lockFile.value.exists() shouldBe true + lock.lockFile.value.exists() shouldBe true + lock.lockFile.value should not be lock2.lockFile.value + + lock2.release() + lock.release() + + } + + } + +} diff --git a/canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/ConsoleCommandResultTest.scala b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/ConsoleCommandResultTest.scala new file mode 100644 index 0000000000..988f4a869c --- /dev/null +++ b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/ConsoleCommandResultTest.scala @@ -0,0 +1,51 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.console.CommandErrors.GenericCommandError +import org.scalatest.wordspec.AnyWordSpec + +class ConsoleCommandResultTest extends AnyWordSpec with BaseTest { + "ConsoleCommandResult" should { + "forAll" should { + "return successfully if action runs on all instances" in { + val instance1 = mock[LocalInstanceReferenceCommon] + val instance2 = mock[LocalInstanceReferenceCommon] + + when(instance1.startCommand()).thenReturn(CommandSuccessful()) + when(instance2.startCommand()).thenReturn(CommandSuccessful()) + + ConsoleCommandResult.forAll(Seq(instance1, instance2)) { + _.startCommand() + } should matchPattern { case CommandSuccessful(_) => + } + } + "continue after failure" in { + val instance1 = mockInstance("instance-1") + val instance2 = mockInstance("instance-2") + val instance3 = mockInstance("instance-3") + + when(instance1.startCommand()).thenReturn(GenericCommandError("BOOM")) + when(instance2.startCommand()).thenReturn(CommandSuccessful()) + when(instance3.startCommand()).thenReturn(GenericCommandError("BANG")) + + ConsoleCommandResult.forAll(Seq(instance1, instance2, instance3)) { + _.startCommand() + } shouldEqual GenericCommandError( + "Command failed on 2 out of 3 instances: (failure on instance-1): BOOM, (failure on instance-3): BANG" + ) + + // verify start was still called on instance2 + verify(instance2).startCommand() + } + } + } + + def mockInstance(name: String): LocalInstanceReferenceCommon = { + val ref = mock[LocalInstanceReferenceCommon] + when(ref.name).thenReturn(name) + ref + } +} diff --git a/canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/ConsoleMacrosTest.scala b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/ConsoleMacrosTest.scala new file mode 100644 index 0000000000..ab0c34e432 --- /dev/null +++ b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/ConsoleMacrosTest.scala @@ -0,0 +1,58 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.config.NonNegativeDuration +import org.scalatest.wordspec.AnyWordSpec + +import java.time.Instant +import java.util.concurrent.atomic.AtomicInteger +import scala.concurrent.duration.* + +final case class TestCase(id: Int, name: String) + +class ConsoleMacrosTest extends AnyWordSpec with BaseTest { + "ConsoleMacrosTest" should { + val expected = Set("id:Int", "name:String") + "object_args" in { + val tmp = TestCase(1, "one") + val res = ConsoleMacros.utils.object_args(tmp) + assertResult(2, tmp)(res.size) + assertResult(expected)(res.toSet) + } + + "type_args" in { + val res = ConsoleMacros.utils.type_args[TestCase] + assertResult(2, res)(res.size) + assertResult(expected)(res.toSet) + } + + "retry_until_true should back off" in { + val started = Instant.now() + val counter = new AtomicInteger(0) + try { + ConsoleMacros.utils + .retry_until_true(timeout = NonNegativeDuration.tryFromDuration(1.second)) { + counter.incrementAndGet() + false + } + fail("should have bounced") + } catch { + case _: IllegalStateException => + } + val ended = Instant.now() + (ended.toEpochMilli - started.toEpochMilli) should be > 900L + + // Upper bound derivation: + // - There is 1 invocation up front. + // - There is 1 invocation after sleeping for 2^0 .. 2^9 milliseconds. + // - Total time slept is 2^0 + ... + 2^9 = 1^10 - 1 = 1023 milliseconds. So the time out must have elapsed. + // - Total number of invocations is 1 + 10 = 11. + // - Sometimes, it might be 12, as the deadline stuff is working on the nanosecond level + counter.get() should be <= 12 + } + + } +} diff --git a/canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/ConsoleTest.scala b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/ConsoleTest.scala new file mode 100644 index 0000000000..ef62f723e6 --- /dev/null +++ b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/ConsoleTest.scala @@ -0,0 +1,309 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import ammonite.runtime.Storage.InMemory +import ammonite.util.Colors +import com.digitalasset.canton.admin.api.client.commands.{ + GrpcAdminCommand, + ParticipantAdminCommands, +} +import com.digitalasset.canton.config.CantonRequireTypes.InstanceName +import com.digitalasset.canton.config.{CantonCommunityConfig, ClientConfig, TestingConfigInternal} +import com.digitalasset.canton.console.CommandErrors.GenericCommandError +import com.digitalasset.canton.console.HeadlessConsole.{ + CompileError, + HeadlessConsoleError, + RuntimeError, +} +import com.digitalasset.canton.domain.DomainNodeBootstrap +import com.digitalasset.canton.environment.{ + CantonNode, + CantonNodeBootstrap, + CommunityConsoleEnvironment, + CommunityEnvironment, + DomainNodes, + Nodes, + ParticipantNodes, +} +import com.digitalasset.canton.metrics.OnDemandMetricsReader.NoOpOnDemandMetricsReader$ +import com.digitalasset.canton.participant.{ParticipantNode, ParticipantNodeBootstrap} +import com.digitalasset.canton.telemetry.ConfiguredOpenTelemetry +import com.digitalasset.canton.{BaseTest, ConfigStubs} +import io.grpc.stub.AbstractStub +import io.opentelemetry.sdk.OpenTelemetrySdk +import io.opentelemetry.sdk.trace.SdkTracerProvider +import org.mockito.ArgumentCaptor +import org.mockito.ArgumentMatchers.{anyString, eq as isEq} +import org.scalatest.Assertion +import org.scalatest.wordspec.AnyWordSpec + +import java.io.ByteArrayOutputStream +import java.nio.file.Paths + +class ConsoleTest extends AnyWordSpec with BaseTest { + + lazy val DefaultConfig: CantonCommunityConfig = CantonCommunityConfig( + domains = Map( + InstanceName.tryCreate("d1") -> ConfigStubs.domain, + InstanceName.tryCreate("d2") -> ConfigStubs.domain, + InstanceName.tryCreate("d-3") -> ConfigStubs.domain, + ), + participants = Map( + InstanceName.tryCreate("p1") -> ConfigStubs.participant + .copy(adminApi = ConfigStubs.adminApi), // for testing admin api + InstanceName.tryCreate("p2") -> ConfigStubs.participant, + InstanceName.tryCreate("new") -> ConfigStubs.participant, + InstanceName.tryCreate("p-4") -> ConfigStubs.participant, + ), + ) + + lazy val NameClashConfig: CantonCommunityConfig = CantonCommunityConfig( + participants = Map( + // Reserved keyword + InstanceName.tryCreate("participants") -> ConfigStubs.participant, + // Name collision + InstanceName.tryCreate("d1") -> ConfigStubs.participant, + ), + domains = Map( + InstanceName.tryCreate("d1") -> ConfigStubs.domain + ), + ) + + abstract class TestEnvironment(val config: CantonCommunityConfig = DefaultConfig) { + val environment: CommunityEnvironment = mock[CommunityEnvironment] + val participants: ParticipantNodes[ + ParticipantNodeBootstrap, + ParticipantNode, + config.ParticipantConfigType, + ] = + mock[ + ParticipantNodes[ParticipantNodeBootstrap, ParticipantNode, config.ParticipantConfigType] + ] + val domains: DomainNodes[config.DomainConfigType] = + mock[DomainNodes[config.DomainConfigType]] + val participant: ParticipantNodeBootstrap = mock[ParticipantNodeBootstrap] + val domain: DomainNodeBootstrap = mock[DomainNodeBootstrap] + + when(environment.config).thenReturn(config) + when(environment.testingConfig).thenReturn( + TestingConfigInternal(initializeGlobalOpenTelemetry = false) + ) + when(environment.participants).thenReturn(participants) + when(environment.domains).thenReturn(domains) + when(environment.simClock).thenReturn(None) + when(environment.loggerFactory).thenReturn(loggerFactory) + when(environment.configuredOpenTelemetry).thenReturn( + ConfiguredOpenTelemetry( + OpenTelemetrySdk.builder().build(), + SdkTracerProvider.builder(), + NoOpOnDemandMetricsReader$, + ) + ) + type NodeGroup = Seq[(String, Nodes[CantonNode, CantonNodeBootstrap[CantonNode]])] + when(environment.startNodes(any[NodeGroup])(anyTraceContext)).thenReturn(Right(())) + + when(participants.startAndWait(anyString())(anyTraceContext)).thenReturn(Right(())) + when(participants.stopAndWait(anyString())(anyTraceContext)).thenReturn(Right(())) + when(participants.isRunning(anyString())).thenReturn(true) + + when(domains.startAndWait(anyString())(anyTraceContext)).thenReturn(Right(())) + + val adminCommandRunner: ConsoleGrpcAdminCommandRunner = mock[ConsoleGrpcAdminCommandRunner] + val testConsoleOutput: TestConsoleOutput = new TestConsoleOutput(loggerFactory) + + // Setup default admin command response + when( + adminCommandRunner + .runCommand( + anyString(), + any[GrpcAdminCommand[_, _, Nothing]], + any[ClientConfig], + isEq(None), + ) + ) + .thenReturn(GenericCommandError("Mocked error")) + + val consoleEnvironment = + new CommunityConsoleEnvironment( + environment, + consoleOutput = testConsoleOutput, + createAdminCommandRunner = _ => adminCommandRunner, + ) + + def runOrFail(commands: String*): Unit = { + val (result, stderr) = run(commands: _*) + + // fail if unexpected content was printed to stderr (this likely indicates an error of some form which wasn't bubbled up through the interpreter) + assertExpectedStdErrorOutput(stderr) + + // fail if the run was unsuccessful + result shouldBe Right(()) + } + + def run(commands: String*): (Either[HeadlessConsoleError, Unit], String) = { + // put a newline at the end to ensure it's run + val input = commands.mkString(s";${System.lineSeparator}") + System.lineSeparator + + // capture output + val errorStream = new ByteArrayOutputStream() + + // run headless but direct stderr to a captured stream + val result = HeadlessConsole.run( + consoleEnvironment, + input, + path = None, + _.copy( + errorStream = errorStream, + colors = + Colors.BlackWhite, // as pretty as colors are, it really messes up the regular expressions we run for verification + storageBackend = + InMemory(), // due to an odd jenkins/docker-in-jenkins thing the `user.home` env var isn't set that blocks up ammonite's default Main() ctor for storage + wd = os.Path(Paths.get(".").toAbsolutePath), + ), + logger = logger, + ) + + (result, errorStream.toString) + } + + def setupAdminCommandResponse[Svc <: AbstractStub[Svc], Result]( + id: String, + result: Either[String, Result], + ): Unit = + when( + adminCommandRunner.runCommand( + isEq((id)), + any[GrpcAdminCommand[_, _, Result]], + any[ClientConfig], + isEq(None), + ) + ) + .thenReturn(result.toResult) + + private val expectedErrorLinePatterns = Seq("Compiling .*", "Bye!") + private def isExpectedStdErrorOutput(stderr: String): Boolean = + stderr + .split(System.lineSeparator) + .filterNot(_.isEmpty) + .forall(line => expectedErrorLinePatterns.exists(line.matches)) + + def assertExpectedStdErrorOutput(stderr: String): Assertion = + assert( + isExpectedStdErrorOutput(stderr), + s"stderr from REPL included unexpected output:${System.lineSeparator}$stderr", + ) + } + + "Console" can { + "start a participant" in new TestEnvironment { + runOrFail("p1 start") + verify(participants).startAndWait("p1") + } + "start a participant with scala keyword as name" in new TestEnvironment { + runOrFail("`new` start") + verify(participants).startAndWait("new") + } + "start a participant with underscore in name" in new TestEnvironment { + runOrFail("`p-4` start") + verify(participants).startAndWait("p-4") + } + "stop a participant" in new TestEnvironment { + runOrFail( + "p1 start", + "p1 stop", + ) + + verify(participants).startAndWait("p1") + verify(participants).stopAndWait("p1") + } + + def verifyStart(env: TestEnvironment, names: Seq[String]): Assertion = { + import env.* + val argCapture: ArgumentCaptor[NodeGroup] = + ArgumentCaptor.forClass(classOf[NodeGroup]) + verify(environment).startNodes(argCapture.capture())(anyTraceContext) + argCapture.getValue.map(_._1) shouldBe names + } + + "start all participants" in new TestEnvironment { + runOrFail("participants.local start") + verifyStart(this, Seq("p1", "p2", "new", "p-4")) + } + "start all domains" in new TestEnvironment { + runOrFail("domains.local start") + verifyStart(this, Seq("d1", "d2", "d-3")) + } + "start all" in new TestEnvironment { + runOrFail("nodes.local.start()") + verifyStart(this, Seq("p1", "p2", "new", "p-4", "d1", "d2", "d-3")) + } + + "return a compile error if the code fails to compile" in new TestEnvironment { + inside(run("This really shouldn't compile")) { case (Left(CompileError(message)), _) => + message shouldEqual + """(synthetic)/ammonite/canton/interpreter/canton$minusscript.sc:1:1 expected end-of-input + |This really shouldn't compile + |^""".stripMargin + } + } + "return a runtime error if the code does not run successfully" in new TestEnvironment { + val (result, _) = run("""sys.error("whoopsie")""") + + inside(result) { case Left(RuntimeError(message, cause)) => + cause.getMessage shouldBe "whoopsie" + cause.getClass shouldBe classOf[RuntimeException] + message shouldEqual "" + } + } + + "participants.all.dars.upload should attempt to invoke UploadDar on all participants" in new TestEnvironment { + setupAdminCommandResponse("p1", Right(Seq())) + setupAdminCommandResponse("p2", Right(Seq())) + setupAdminCommandResponse("new", Right(Seq())) + setupAdminCommandResponse("p-4", Right(Seq())) + + runOrFail(s"""participants.all.dars.upload("$CantonExamplesPath", false)""") + + def verifyUploadDar(p: String): ConsoleCommandResult[String] = + verify(adminCommandRunner).runCommand( + isEq(p), + any[ParticipantAdminCommands.Package.UploadDar], + any[ClientConfig], + isEq(None), + ) + + verifyUploadDar("p1") + verifyUploadDar("p2") + verifyUploadDar("new") + verifyUploadDar("p-4") + } + + "participants.local help shows help from both InstanceExtensions and ParticipantExtensions" in new TestEnvironment { + testConsoleOutput.assertConsoleOutput( + { + runOrFail("participants.local help") + }, + { helpText => + helpText should include("start") // from instance extensions + helpText should include("stop") + helpText should include("dars") + helpText should include("domains") + }, + ) + } + } + + "Console" must { + "fail on name clashes in config" in new TestEnvironment(NameClashConfig) { + inside(run("1+1")) { case (Left(RuntimeError(message, ex)), _) => + message shouldEqual "Unable to create the console bindings" + ex.getMessage should startWith( + """Node names must be unique and must differ from reserved keywords. Please revisit node names in your config file. + |Offending names: (`d1` (2 occurrences), `participants` (2 occurrences))""".stripMargin + ) + } + } + } +} diff --git a/canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/HelpTest.scala b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/HelpTest.scala new file mode 100644 index 0000000000..d67aef24c2 --- /dev/null +++ b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/console/HelpTest.scala @@ -0,0 +1,177 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.console + +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.console.Help.forInstance +import org.scalatest.funsuite.AnyFunSuite + +class HelpTest extends AnyFunSuite with BaseTest { + + object Example { + + class MoreNested {} + + class Nested { + @Help.Summary("A nested method") + @Help.Topic(Seq("Nested")) + def nestedMethod(name: String): Int = 44 + } + + @Help.Summary("Full Method") + @Help.Description("Full Method description") + @Help.Topic(Seq("Example", "Full Method")) + def fullMethod(name: String): Int = 42 + + @Help.Summary("No Params and Unit", FeatureFlag.Testing) + def noParametersAndUnit(): Unit = () + + @Help.Summary("Should not print unit") + def unitReturning(name: String): Unit = () + + @Help.Summary("Multiple parameters") + def multipleParameters(first: String, second: Int): Unit = () + + @Help.Summary("Some grouped methods") + @Help.Group("Key Vault Api") + def nested: Nested = new Nested() + + } + + trait TestHelpful { + @Help.Summary("Usage") + def help(): String = forInstance(this) + } + + object HelpfulExample extends TestHelpful { + @Help.Summary("Is a thing") + def thing(): Unit = () + } + + object ImplicitsExample extends TestHelpful { + @Help.Summary("Implicit should not be visible") + def thing(implicit someImplicit: Int): Unit = () + + @Help.Summary("Even if in the second argument set") + def another(name: String)(implicit someImplicit: Int): Unit = () + } + + trait Trait1 { + @Help.Summary("Trait1") + @Help.Topic(Seq("Top-level Commands")) + def trait1(): Unit = {} + } + + trait Trait2 { + @Help.Summary("Trait2") + @Help.Topic(Seq("Top-level Commands")) + def trait2(): Unit = {} + } + + class MultipleTraits extends Helpful with Trait1 with Trait2 {} + + test("Producing help for Example") { + forInstance(Example) should be( + """ + |Top-level Commands + |------------------ + |multipleParameters - Multiple parameters + |noParametersAndUnit - No Params and Unit + |unitReturning - Should not print unit + | + |Example: Full Method + |-------------------- + |fullMethod - Full Method + | + |Command Groups + |-------------- + |nested - Some grouped methods + """.stripMargin.trim + ) + } + + test("Omit the testing scope on the help") { + forInstance(Example, scope = Set(FeatureFlag.Stable)) should not include ("noParametersAndUnit") + } + + test("Omit the testing scope on suggestions") { + Help.forMethod( + Example, + "no", + scope = Set(FeatureFlag.Stable), + ) should not include "noParametersAndUnit" + } + + test("Not find the method which is out of scope") { + Help.forMethod( + Example, + "noParametersAndUnit", + scope = Set(FeatureFlag.Stable), + ) shouldBe "Error: method noParametersAndUnit not found; check your spelling" + } + + test("Units don't get displayed") { + Help + .forMethod(Example, "unitReturning") + .trim shouldBe "unitReturning(name: String)\nShould not print unit" + Help + .forMethod(Example, "noParametersAndUnit") + .trim shouldBe "noParametersAndUnit\nNo Params and Unit" + } + + test("Having a trait generating helpful messages") { + HelpfulExample.help() should be(""" + |Top-level Commands + |------------------ + |help - Usage + |thing - Is a thing + """.stripMargin.trim) + } + + test("Implicits don't get displayed") { + Help.forMethod(ImplicitsExample, "thing").trim shouldBe "thing\nImplicit should not be visible" + Help + .forMethod(ImplicitsExample, "another") + .trim shouldBe "another(name: String)\nEven if in the second argument set" + } + + test("Help should be sourced from all traits") { + forInstance(new MultipleTraits()) should be( + """ + |Top-level Commands + |------------------ + |help - Help for specific commands (use help() or help("method") for more information) + |trait1 - Trait1 + |trait2 - Trait2 + """.stripMargin.trim + ) + } + + test("Description and types get displayed for detailed method help") { + Help.forMethod(Example, "fullMethod") shouldBe + Seq("fullMethod(name: String): Int", "Full Method description").mkString(System.lineSeparator) + } + + test("Help suggestions get displayed for top level items") { + Help.forMethod(Example, "unit") should be( + """Error: method unit not found; are you looking for one of the following? + | unitReturning""".stripMargin + ) + } + + test("Help suggestions get displayed for nested items") { + Help.forMethod(Example, "nestedMet") should be( + """Error: method nestedMet not found; are you looking for one of the following? + | nested.nestedMethod""".stripMargin + ) + } + + test("Description and types get displayed for nested method help") { + Help.forMethod(Example, "nested.nestedMethod") should be( + """nested.nestedMethod(name: String): Int + |A nested method""".stripMargin + ) + } + +} diff --git a/canton-3x/community/app/src/test/scala/com/digitalasset/canton/environment/CommunityEnvironmentTest.scala b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/environment/CommunityEnvironmentTest.scala new file mode 100644 index 0000000000..6c24e25f25 --- /dev/null +++ b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/environment/CommunityEnvironmentTest.scala @@ -0,0 +1,248 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.environment + +import cats.data.EitherT +import com.digitalasset.canton.config.CantonRequireTypes.InstanceName +import com.digitalasset.canton.config.RequireTypes.Port +import com.digitalasset.canton.config.{CantonCommunityConfig, TestingConfigInternal} +import com.digitalasset.canton.domain.DomainNodeBootstrap +import com.digitalasset.canton.domain.config.{ + CommunityDomainConfig, + CommunityPublicServerConfig, + DomainConfig, +} +import com.digitalasset.canton.integration.CommunityConfigTransforms +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.participant.config.* +import com.digitalasset.canton.participant.domain.DomainConnectionConfig +import com.digitalasset.canton.participant.sync.SyncServiceError +import com.digitalasset.canton.participant.{ParticipantNode, ParticipantNodeBootstrap} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.EitherTUtil +import com.digitalasset.canton.{BaseTest, ConfigStubs, HasExecutionContext} +import monocle.macros.syntax.lens.* +import org.mockito.ArgumentMatchers +import org.mockito.ArgumentMatchers.anyString +import org.scalatest.wordspec.AnyWordSpec + +import scala.concurrent.{ExecutionContext, Future} + +class CommunityEnvironmentTest extends AnyWordSpec with BaseTest with HasExecutionContext { + // we don't care about any values of this config, so just mock + lazy val domain1Config: CommunityDomainConfig = ConfigStubs.domain + lazy val domain2Config: CommunityDomainConfig = ConfigStubs.domain + lazy val participant1Config: CommunityParticipantConfig = ConfigStubs.participant + lazy val participant2Config: CommunityParticipantConfig = ConfigStubs.participant + + lazy val sampleConfig: CantonCommunityConfig = CantonCommunityConfig( + domains = Map( + InstanceName.tryCreate("d1") -> domain1Config, + InstanceName.tryCreate("d2") -> domain2Config, + ), + participants = Map( + InstanceName.tryCreate("p1") -> participant1Config, + InstanceName.tryCreate("p2") -> participant2Config, + ), + ) + + trait CallResult[A] { + def get: A + } + + trait TestEnvironment { + def config: CantonCommunityConfig = sampleConfig + + private val createParticipantMock = + mock[(String, LocalParticipantConfig) => ParticipantNodeBootstrap] + private val createDomainMock = mock[(String, DomainConfig) => DomainNodeBootstrap] + + def mockDomain: DomainNodeBootstrap = { + val domain = mock[DomainNodeBootstrap] + when(domain.start()).thenReturn(EitherT.pure[Future, String](())) + when(domain.name).thenReturn(InstanceName.tryCreate("mockD")) + domain + } + + def mockParticipantAndNode: (ParticipantNodeBootstrap, ParticipantNode) = { + val bootstrap = mock[ParticipantNodeBootstrap] + val node = mock[ParticipantNode] + when(bootstrap.name).thenReturn(InstanceName.tryCreate("mockP")) + when(bootstrap.start()).thenReturn(EitherT.pure[Future, String](())) + when(bootstrap.getNode).thenReturn(Some(node)) + when(node.reconnectDomainsIgnoreFailures()(any[TraceContext], any[ExecutionContext])) + .thenReturn(EitherT.pure[FutureUnlessShutdown, SyncServiceError](())) + when(node.config).thenReturn(participant1Config) + (bootstrap, node) + } + def mockParticipant: ParticipantNodeBootstrap = mockParticipantAndNode._1 + + val environment = new CommunityEnvironment( + config, + TestingConfigInternal(initializeGlobalOpenTelemetry = false), + loggerFactory, + ) { + override def createParticipant( + name: String, + participantConfig: CommunityParticipantConfig, + ): ParticipantNodeBootstrap = + createParticipantMock(name, participantConfig) + override def createDomain( + name: String, + domainConfig: CommunityDomainConfig, + ): DomainNodeBootstrap = + createDomainMock(name, domainConfig) + } + + protected def setupParticipantFactory(create: => ParticipantNodeBootstrap): Unit = + setupParticipantFactoryInternal(anyString(), create) + + protected def setupParticipantFactory(id: String, create: => ParticipantNodeBootstrap): Unit = + setupParticipantFactoryInternal(ArgumentMatchers.eq(id), create) + + private def setupParticipantFactoryInternal( + idMatcher: => String, + create: => ParticipantNodeBootstrap, + ): Unit = + when(createParticipantMock(idMatcher, any[LocalParticipantConfig])).thenAnswer(create) + + protected def setupDomainFactory(id: String, create: => DomainNodeBootstrap): Unit = + when(createDomainMock(eqTo(id), any[DomainConfig])).thenAnswer(create) + } + + "Environment" when { + "starting with startAndReconnect" should { + "succeed normally" in new TestEnvironment { + + val pp = mockParticipant + Seq("p1", "p2").foreach(setupParticipantFactory(_, pp)) + Seq("d1", "d2").foreach(setupDomainFactory(_, mockDomain)) + + environment.startAndReconnect(false) shouldBe Right(()) + verify(pp.getNode.valueOrFail("node should be set"), times(2)) + .reconnectDomainsIgnoreFailures()(any[TraceContext], any[ExecutionContext]) + + } + + "auto-connect if requested" in new TestEnvironment { + + override def config: CantonCommunityConfig = + (CommunityConfigTransforms.updateAllDomainConfigs { case (_, config) => + config + .focus(_.publicApi) + .replace(CommunityPublicServerConfig(internalPort = Some(Port.tryCreate(42)))) + })(sampleConfig) + + val (pp, pn) = mockParticipantAndNode + val d1 = mockDomain + val d2 = mockDomain + + when(pp.isActive).thenReturn(true) + when(d1.isActive).thenReturn(true) + when(d2.isActive).thenReturn(false) + + when(d1.config).thenReturn( + config.domainsByString.get("d1").valueOrFail("where is my config?") + ) + when( + pn.autoConnectLocalDomain(any[DomainConnectionConfig])( + any[TraceContext], + any[ExecutionContext], + ) + ).thenReturn(EitherTUtil.unitUS) + + Seq("p1", "p2").foreach(setupParticipantFactory(_, pp)) + setupDomainFactory("d1", d1) + setupDomainFactory("d2", d2) + + clue("auto-start") { + environment.startAndReconnect(true) shouldBe Right(()) + } + + verify(pn, times(2)).autoConnectLocalDomain(any[DomainConnectionConfig])( + any[TraceContext], + any[ExecutionContext], + ) + + } + + "write ports file if desired" in new TestEnvironment { + + override def config: CantonCommunityConfig = { + val tmp = sampleConfig.focus(_.parameters.portsFile).replace(Some("my-ports.txt")) + (CommunityConfigTransforms.updateAllParticipantConfigs { case (_, config) => + config + .focus(_.ledgerApi) + .replace(LedgerApiServerConfig(internalPort = Some(Port.tryCreate(42)))) + })(tmp) + } + + val f = new java.io.File("my-ports.txt") + f.deleteOnExit() + + val pp = mockParticipant + when(pp.config).thenReturn( + config.participantsByString.get("p1").valueOrFail("config should be there") + ) + Seq("p1", "p2").foreach(setupParticipantFactory(_, pp)) + Seq("d1", "d2").foreach(setupDomainFactory(_, mockDomain)) + + clue("write ports file") { + environment.startAndReconnect(false) shouldBe Right(()) + } + assert(f.exists()) + + } + + "not start if manual start is desired" in new TestEnvironment { + override def config: CantonCommunityConfig = + sampleConfig.focus(_.parameters.manualStart).replace(true) + + // These would throw on start, as all methods return null. + val myDomain: DomainNodeBootstrap = mock[DomainNodeBootstrap] + val myParticipant: ParticipantNodeBootstrap = mock[ParticipantNodeBootstrap] + + Seq("p1", "p2").foreach(setupParticipantFactory(_, myParticipant)) + Seq("d1", "d2").foreach(setupDomainFactory(_, myDomain)) + + environment.startAndReconnect(false) shouldBe Right(()) + } + + "report exceptions" in new TestEnvironment { + val exception = new RuntimeException("wurstsalat") + + Seq("p1", "p2").foreach(setupParticipantFactory(_, throw exception)) + Seq("d1", "d2").foreach(setupDomainFactory(_, throw exception)) + + assertThrows[RuntimeException](environment.startAndReconnect(false)) + + } + } + "starting with startAll" should { + "report exceptions" in new TestEnvironment { + val exception = new RuntimeException("nope") + + // p1, d1 and d2 will successfully come up + val d1: DomainNodeBootstrap = mockDomain + val d2: DomainNodeBootstrap = mockDomain + val p1: ParticipantNodeBootstrap = mockParticipant + setupParticipantFactory("p1", p1) + setupDomainFactory("d1", d1) + setupDomainFactory("d2", d2) + + // p2 will fail to come up + setupParticipantFactory("p2", throw exception) + the[RuntimeException] thrownBy environment.startAll() shouldBe exception + // start all will kick off stuff in the background but the "parTraverseWithLimit" + // will terminate eagerly. so we actually have to wait until the processes finished + // in the background + eventually() { + environment.domains.running.toSet shouldBe Set(d1, d2) + environment.participants.running should contain.only(p1) + } + } + } + } + +} diff --git a/canton-3x/community/app/src/test/scala/com/digitalasset/canton/environment/NodesTest.scala b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/environment/NodesTest.scala new file mode 100644 index 0000000000..43eda8459f --- /dev/null +++ b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/environment/NodesTest.scala @@ -0,0 +1,185 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.environment + +import cats.data.EitherT +import com.digitalasset.canton.* +import com.digitalasset.canton.concurrent.ExecutionContextIdlenessExecutorService +import com.digitalasset.canton.config.CantonRequireTypes.InstanceName +import com.digitalasset.canton.config.* +import com.digitalasset.canton.crypto.Crypto +import com.digitalasset.canton.lifecycle.ShutdownFailedException +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.resource.CommunityDbMigrationsFactory +import com.digitalasset.canton.sequencing.client.SequencerClientConfig +import com.digitalasset.canton.time.{Clock, SimClock} +import com.digitalasset.canton.topology.NodeId +import org.scalatest.wordspec.AnyWordSpec + +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.duration.DurationInt +import scala.concurrent.{Await, Future, Promise} + +class NodesTest extends AnyWordSpec with BaseTest with HasExecutionContext { + val clock = new SimClock(loggerFactory = loggerFactory) + trait TestNode extends CantonNode + case class TestNodeConfig() + extends LocalNodeConfig + with ConfigDefaults[DefaultPorts, TestNodeConfig] { + override val init: InitConfig = InitConfig() + override val adminApi: CommunityAdminServerConfig = CommunityAdminServerConfig() + override val storage: CommunityStorageConfig = CommunityStorageConfig.Memory() + override val crypto: CommunityCryptoConfig = CommunityCryptoConfig() + override val sequencerClient: SequencerClientConfig = SequencerClientConfig() + override val caching: CachingConfigs = CachingConfigs() + override val nodeTypeName: String = "test-node" + override def clientAdminApi = adminApi.clientConfig + override def withDefaults(ports: DefaultPorts): TestNodeConfig = this + override val monitoring: NodeMonitoringConfig = NodeMonitoringConfig() + override val topologyX: TopologyXConfig = TopologyXConfig.NotUsed + override def parameters: LocalNodeParametersConfig = new LocalNodeParametersConfig { + override def batching: BatchingConfig = BatchingConfig() + } + } + + class TestNodeBootstrap extends CantonNodeBootstrap[TestNode] { + override def name: InstanceName = ??? + override def clock: Clock = ??? + override def crypto: Option[Crypto] = ??? + override def getId: Option[NodeId] = ??? + override def isInitialized: Boolean = ??? + override def start(): EitherT[Future, String, Unit] = EitherT.pure[Future, String](()) + override def getNode: Option[TestNode] = ??? + override def onClosed(): Unit = () + override protected def loggerFactory: NamedLoggerFactory = ??? + override protected def timeouts: ProcessingTimeout = DefaultProcessingTimeouts.testing + override def isActive: Boolean = true + } + + class TestNodeFactory { + private class CreateResult(result: => TestNodeBootstrap) { + def get = result + } + private val createResult = new AtomicReference[CreateResult]( + new CreateResult(new TestNodeBootstrap) + ) + def setupCreate(result: => TestNodeBootstrap): Unit = + createResult.set(new CreateResult(result)) + + def create(name: String, config: TestNodeConfig): TestNodeBootstrap = createResult.get.get + } + + class TestNodes(factory: TestNodeFactory, configs: Map[String, TestNodeConfig]) + extends ManagedNodes[TestNode, TestNodeConfig, CantonNodeParameters, TestNodeBootstrap]( + factory.create, + new CommunityDbMigrationsFactory(loggerFactory), + timeouts, + configs, + _ => MockedNodeParameters.cantonNodeParameters(), + startUpGroup = 0, + NodesTest.this.loggerFactory, + ) { + protected val executionContext: ExecutionContextIdlenessExecutorService = + NodesTest.this.executorService + } + + trait Fixture { + val configs = Map( + "n1" -> TestNodeConfig() + ) + val nodeFactory = new TestNodeFactory + val nodes = new TestNodes(nodeFactory, configs) + } + + class StartStopFixture(startupResult: Either[String, Unit]) extends Fixture { + val startPromise = Promise[Either[String, Unit]]() + val startReached = Promise[Unit]() + val node = new TestNodeBootstrap { + override def start(): EitherT[Future, String, Unit] = { + startReached.success(()) + EitherT(startPromise.future) + } + } + nodeFactory.setupCreate(node) + val start = nodes.start("n1") + startReached.future.futureValue // wait until start happened + val stop = nodes.stop("n1") + // push start result + startPromise.success(startupResult) + // node should be properly closed and stop should succeed + stop.value.futureValue shouldBe Right(()) + node.isClosing shouldBe true + // wait for start to be have completed all callbacks including removing n1 from nodes. + start.value.futureValue.discard + nodes.isRunning("n1") shouldBe false + startupResult match { + case Left(value) => start.value.futureValue shouldBe Left(StartFailed("n1", value)) + case Right(_) => start.value.futureValue.isRight shouldBe true + } + + } + + "starting a node" should { + "return config not found error if using a bad id" in new Fixture { + nodes.startAndWait("nope") shouldEqual Left(ConfigurationNotFound("nope")) + } + "not error if the node is already running when we try to start" in new Fixture { + nodes.startAndWait("n1").map(_ => ()) shouldBe Right(()) // first create should work + nodes.startAndWait("n1").map(_ => ()) shouldBe Right(()) // second is now a noop + } + "return an initialization failure if an exception is thrown during startup" in new Fixture { + val exception = new RuntimeException("Nope!") + nodeFactory.setupCreate { throw exception } + the[RuntimeException] thrownBy Await.result( + nodes.start("n1").value, + 10.seconds, + ) shouldBe exception + } + "return a proper left if startup fails" in new Fixture { + val node = new TestNodeBootstrap { + override def start(): EitherT[Future, String, Unit] = EitherT.leftT("HelloBello") + } + nodeFactory.setupCreate(node) + nodes.startAndWait("n1") shouldBe Left(StartFailed("n1", "HelloBello")) + node.isClosing shouldBe true + } + } + "stopping a node" should { + "return config not found error if using a bad id" in new Fixture { + nodes.stopAndWait("nope") shouldEqual Left(ConfigurationNotFound("nope")) + } + "return successfully if the node is not running" in new Fixture { + nodes.stopAndWait("n1") shouldBe Right(()) + } + "return an initialization failure if an exception is thrown during shutdown" in new Fixture { + val anException = new RuntimeException("Nope!") + val node = new TestNodeBootstrap { + override def onClosed() = { + throw anException + } + } + nodeFactory.setupCreate(node) + + nodes.startAndWait("n1") shouldBe Right(()) + + loggerFactory.assertThrowsAndLogs[ShutdownFailedException]( + nodes.stopAndWait("n1"), + entry => { + entry.warningMessage should fullyMatch regex "Closing .* failed! Reason:" + entry.throwable.value shouldBe anException + }, + ) + } + "properly stop a running node" in new Fixture { + nodes.startAndWait("n1") shouldBe Right(()) + nodes.isRunning("n1") shouldBe true + nodes.stopAndWait("n1") shouldBe Right(()) + nodes.isRunning("n1") shouldBe false + } + "work when we are just starting" when { + "start succeeded" in new StartStopFixture(Right(())) {} + "start failed" in new StartStopFixture(Left("Stinky")) + } + } +} diff --git a/canton-3x/community/app/src/test/scala/com/digitalasset/canton/health/HealthCheckTest.scala b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/health/HealthCheckTest.scala new file mode 100644 index 0000000000..cf7f1da560 --- /dev/null +++ b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/health/HealthCheckTest.scala @@ -0,0 +1,258 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.health + +import com.codahale.metrics.MetricRegistry +import com.daml.metrics.api.MetricName +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.config.CantonCommunityConfig +import com.digitalasset.canton.config.CantonRequireTypes.InstanceName +import com.digitalasset.canton.environment.{Environment, ParticipantNodes} +import com.digitalasset.canton.ledger.api.refinements.ApiTypes.WorkflowId +import com.digitalasset.canton.metrics.HealthMetrics +import com.digitalasset.canton.participant.admin.{AdminWorkflowServices, PingService} +import com.digitalasset.canton.participant.ledger.api.StartableStoppableLedgerApiDependentServices +import com.digitalasset.canton.participant.{ParticipantNode, ParticipantNodeBootstrap} +import com.digitalasset.canton.time.SimClock +import com.digitalasset.canton.topology.{DomainId, ParticipantId, UniqueIdentifier} +import com.digitalasset.canton.tracing.TraceContext +import org.scalatest.wordspec.AsyncWordSpec + +import java.time.Duration +import scala.concurrent.duration.* +import scala.concurrent.{Future, Promise} + +/** Stub health check that returns the configured response to all isHealthy calls. + * Counts how many times isHealthy has been called. + * Not thread safe. + */ +@SuppressWarnings(Array("org.wartremover.warts.Var")) +class StubbedCheck( + initialValue: Future[HealthCheckResult] = + Future.failed(new RuntimeException("check result not stubbed")) +) extends HealthCheck { + var callCount = 0 + var nextResult: Future[HealthCheckResult] = initialValue + + override def isHealthy(implicit traceContext: TraceContext): Future[HealthCheckResult] = { + callCount += 1 + nextResult + } +} + +class HealthCheckTest extends AsyncWordSpec with BaseTest { + + "ping check" should { + val participant = "the_participant_alias" + + "return unhealthy if running participants are not yet available" in + loggerFactory.suppressWarningsAndErrors { + healthCheck(withNullParticipants()).isHealthy.map { + inside(_) { case Unhealthy(message) => + message should include("not started") + } + } + } + + "return unhealthy if running participant is not running" in + loggerFactory.suppressWarningsAndErrors { + healthCheck(withNotStartedParticipant()).isHealthy.map { + inside(_) { case Unhealthy(message) => + message should include("not started") + } + } + } + + "return unhealthy if running participant is not initialized" in + loggerFactory.suppressWarningsAndErrors { + healthCheck(withNotInitializedParticipant()).isHealthy.map { + inside(_) { case Unhealthy(message) => + message should include("not been initialized") + } + } + } + + "return unhealthy if initialized participant is still not connected" in + loggerFactory.suppressWarningsAndErrors { + healthCheck(withNotConnectedParticipant()).isHealthy.map { + inside(_) { case Unhealthy(message) => + message should include("not connected to any domains") + } + } + } + + "return unhealthy if ping fails with an unexpected error" in + loggerFactory.suppressWarningsAndErrors { + healthCheck(withPingResult(Future.failed(new RuntimeException("Unexpected")))).isHealthy + .map { + inside(_) { case Unhealthy(message) => + message shouldBe "ping failed" + } + } + } + + "return unhealthy if ping fails" in + loggerFactory.suppressWarningsAndErrors { + healthCheck(withPingResult(Future.successful(PingService.Failure))).isHealthy.map { + inside(_) { case Unhealthy(message) => + message shouldBe "ping failure" + } + } + } + + "return healthy if ping succeeds" in + healthCheck( + withPingResult(Future.successful(PingService.Success(Duration.ofSeconds(1), participant))) + ).isHealthy + .map { + _ should matchPattern { case Healthy => + } + } + + @SuppressWarnings(Array("org.wartremover.warts.Null")) + def withNullParticipants(): Environment = mockEnvironment { env => + when(env.environment.participants).thenReturn(null) + } + + def withNotStartedParticipant(): Environment = mockEnvironment { mocks => + when(mocks.participants.getRunning(participant)).thenReturn(None) + } + + def withNotInitializedParticipant(): Environment = mockEnvironment { mocks => + val init = mock[ParticipantNodeBootstrap] + when(init.name).thenReturn(InstanceName.tryCreate(participant)) + when(init.getNode).thenReturn(None) + when(mocks.participants.getRunning(participant)).thenReturn(Some(init)) + } + + def withNotConnectedParticipant(): Environment = mockEnvironment { mocks => + val init = mock[ParticipantNodeBootstrap] + val node = mock[ParticipantNode] + when(node.readyDomains).thenReturn(Map[DomainId, Boolean]()) + when(init.getNode).thenReturn(Some(node)) + when(mocks.participants.getRunning(participant)).thenReturn(Some(init)) + } + + def withPingResult(result: Future[PingService.Result]): Environment = mockEnvironment { mocks => + val init = mock[ParticipantNodeBootstrap] + val node = mock[ParticipantNode] + val id = ParticipantId(UniqueIdentifier.tryFromProtoPrimitive(s"${participant}::test")) + val ledgerApiDependentServices = mock[StartableStoppableLedgerApiDependentServices] + val adminWorkflowServices = mock[AdminWorkflowServices] + val pingService = mock[PingService] + + when(init.getNode).thenReturn(Some(node)) + when(node.id).thenReturn(id) + when(node.readyDomains).thenReturn(Map(DomainId.tryFromString("test::test") -> true)) + when(node.ledgerApiDependentCantonServices).thenReturn(ledgerApiDependentServices) + when(ledgerApiDependentServices.adminWorkflowServices(any[TraceContext])) + .thenReturn(adminWorkflowServices) + when(adminWorkflowServices.ping).thenReturn(pingService) + when( + pingService + .ping( + any[Set[String]], + any[Set[String]], + anyLong, + anyLong, + anyLong, + any[Option[WorkflowId]], + any[String], + )(any[TraceContext]) + ) + .thenReturn(result) + + when(mocks.participants.getRunning(participant)).thenReturn(Some(init)) + } + + trait MockEnvironment extends Environment { + override type Config = CantonCommunityConfig + } + + trait MockEnvironmentInstance { + val environment = mock[MockEnvironment] + val participants = mock[ParticipantNodes[ + ParticipantNodeBootstrap, + ParticipantNode, + MockEnvironment#Config#ParticipantConfigType, + ]] + + when(environment.participants).thenReturn(participants) + } + + def mockEnvironment(setup: MockEnvironmentInstance => Unit): Environment = { + val mockEnvironment = new MockEnvironmentInstance {} + setup(mockEnvironment) + mockEnvironment.environment + } + + def healthCheck(environment: Environment) = + new PingHealthCheck( + environment, + participant, + 10.seconds, + new HealthMetrics(MetricName("test"), new MetricRegistry()), + loggerFactory, + ) + } + + "periodic check" should { + "only runs check on creation and intervals" in { + val interval = 1.second + val underlyingCheck = new StubbedCheck + val simClock = new SimClock(loggerFactory = loggerFactory) + val firstPromise = Promise[HealthCheckResult]() + underlyingCheck.nextResult = firstPromise.future + val periodicCheck = new PeriodicCheck(simClock, interval, loggerFactory)(underlyingCheck) + + // should have been called on creation + underlyingCheck.callCount shouldBe 1 + + val firstResultFuture = periodicCheck.isHealthy + // should have just returned the original future + underlyingCheck.callCount shouldBe 1 + + // complete the first underlyingCheck + firstPromise.trySuccess(Healthy) + + for { + firstResult <- firstResultFuture + _ = firstResult shouldBe Healthy + // calling again should just return the same future result and immediately resolve + _ <- periodicCheck.isHealthy + // and call count should remain the same + _ = underlyingCheck.callCount shouldBe 1 + // setup next underlyingCheck result + secondPromise = Promise[HealthCheckResult]() + _ = underlyingCheck.nextResult = secondPromise.future + // when time passes beyond the interval it should trigger another underlyingCheck + _ = simClock.advance(Duration.ofMillis(interval.toMillis + 1)) + _ = underlyingCheck.callCount shouldBe 2 + // however the periodic underlyingCheck is still returning the prior result until the next underlyingCheck has completed + anotherResult <- periodicCheck.isHealthy + _ = anotherResult shouldBe Healthy + // now lets complete the next call + _ = secondPromise.trySuccess(Unhealthy(":(")) + // now the period underlyingCheck should return this unhealthy + finalResult <- periodicCheck.isHealthy + } yield finalResult shouldBe Unhealthy(":(") + } + + "not perform checks after being closed" in { + val interval = 1.second + val underlyingCheck = new StubbedCheck(Future.successful(Healthy)) + val simClock = new SimClock(loggerFactory = loggerFactory) + val periodicCheck = new PeriodicCheck(simClock, interval, loggerFactory)(underlyingCheck) + + for { + _ <- periodicCheck.isHealthy + initialCallCount = underlyingCheck.callCount + _ = periodicCheck.close() + _ = simClock.advance(Duration.ofMillis(interval.toMillis + 1)) + _ <- periodicCheck.isHealthy + nextCallCount = underlyingCheck.callCount + } yield nextCallCount should equal(initialCallCount) + } + } +} diff --git a/canton-3x/community/app/src/test/scala/com/digitalasset/canton/health/HealthServerTest.scala b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/health/HealthServerTest.scala new file mode 100644 index 0000000000..6c150556e0 --- /dev/null +++ b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/health/HealthServerTest.scala @@ -0,0 +1,25 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.health + +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.health.HealthServer.route +import org.apache.pekko.http.scaladsl.model.StatusCodes +import org.apache.pekko.http.scaladsl.testkit.ScalatestRouteTest +import org.scalatest.wordspec.AnyWordSpec + +class HealthServerTest extends AnyWordSpec with BaseTest with ScalatestRouteTest { + "HealthServer" should { + "return 200 if the check is healthy" in + Get("/health") ~> route(StaticHealthCheck(Healthy)) ~> check { + status shouldBe StatusCodes.OK + } + + "return 500 if the check is unhealthy" in + Get("/health") ~> route(StaticHealthCheck(Unhealthy(":("))) ~> check { + status shouldBe StatusCodes.InternalServerError + responseAs[String] shouldBe ":(" + } + } +} diff --git a/canton-3x/community/app/src/test/scala/com/digitalasset/canton/integration/tests/EnterpriseFeatureInCommunityIntegrationTest.scala b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/integration/tests/EnterpriseFeatureInCommunityIntegrationTest.scala new file mode 100644 index 0000000000..64ebb7f2e0 --- /dev/null +++ b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/integration/tests/EnterpriseFeatureInCommunityIntegrationTest.scala @@ -0,0 +1,88 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests + +import com.digitalasset.canton.console.{CommandFailure, LocalDomainReference} +import com.digitalasset.canton.integration.CommunityTests.{ + CommunityIntegrationTest, + CommunityTestConsoleEnvironment, + SharedCommunityEnvironment, +} +import com.digitalasset.canton.integration.{ + CommunityConfigTransforms, + CommunityEnvironmentDefinition, +} +import com.digitalasset.canton.participant.admin.grpc.PruningServiceError.PruningNotSupportedInCommunityEdition +import monocle.macros.syntax.lens.* + +class EnterpriseFeatureInCommunityIntegrationTest + extends CommunityIntegrationTest + with SharedCommunityEnvironment { + override def environmentDefinition: CommunityEnvironmentDefinition = + CommunityEnvironmentDefinition.simpleTopology + .addConfigTransforms( + _.focus(_.features.enableTestingCommands).replace(true), // For ping + _.focus(_.features.enablePreviewCommands).replace(true), // For pruning + CommunityConfigTransforms.uniquePorts, + ) + .withSetup { implicit env => + // we're only testing domain commands immediately so only start that + mydomain.start() + } + + "sequencer and mediator enterprise admin commands should gracefully fail" in { implicit env => + loggerFactory.assertThrowsAndLogs[CommandFailure]( + mydomain.sequencer.pruning.prune(), + // logged at the server + _.warningMessage should include( + "This Community edition of canton does not support the operation: EnterpriseSequencerAdministrationService.Prune." + ), + // logged at the client + _.commandFailureMessage should include("unsupported by the Community edition of canton"), + ) + + loggerFactory.assertThrowsAndLogs[CommandFailure]( + mydomain.mediator.prune(), + // logged at the server + _.warningMessage should include( + "This Community edition of canton does not support the operation: EnterpriseMediatorAdministrationService.Prune." + ), + // logged at the client + _.commandFailureMessage should include("unsupported by the Community edition of canton"), + ) + } + + "participant pruning should fail gracefully" in { implicit env => + import env.* + + participant1.start() + participant1.domains.connect_local(mydomain) + + val startOffset = participant1.ledger_api.completions.end() + // Generate some data after the pruning point + participant1.health.ping(participant1) + + def assertCannotPrune(task: => Unit, clue: String): Unit = withClue(clue) { + loggerFactory.assertThrowsAndLogs[CommandFailure]( + task, + _.warningMessage should include( + "Canton participant pruning not supported in canton-open-source edition" + ), + _.errorMessage should include(PruningNotSupportedInCommunityEdition.id), + ) + } + + assertCannotPrune(participant1.pruning.prune(startOffset), "prune") + assertCannotPrune(participant1.pruning.prune(startOffset), "prune_internally") + + loggerFactory.assertThrowsAndLogs[CommandFailure]( + participant1.pruning.find_safe_offset(), + // TODO(#5990) find_safe_offset uses sync inspection and doesn't go through a gRPC error with an error code + _.errorMessage should include(PruningNotSupportedInCommunityEdition.Error().cause), + ) + } + + private def mydomain(implicit env: CommunityTestConsoleEnvironment): LocalDomainReference = + env.d("mydomain") +} diff --git a/canton-3x/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ExampleIntegrationTest.scala b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ExampleIntegrationTest.scala new file mode 100644 index 0000000000..2cd526433d --- /dev/null +++ b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ExampleIntegrationTest.scala @@ -0,0 +1,128 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests + +import better.files.* +import com.digitalasset.canton.ConsoleScriptRunner +import com.digitalasset.canton.config.CantonRequireTypes.InstanceName +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.environment.Environment +import com.digitalasset.canton.integration.CommunityTests.{ + CommunityIntegrationTest, + IsolatedCommunityEnvironments, +} +import com.digitalasset.canton.integration.tests.ExampleIntegrationTest.{ + advancedConfiguration, + ensureSystemProperties, + repairConfiguration, + simpleTopology, +} +import com.digitalasset.canton.integration.{ + CommunityConfigTransforms, + CommunityEnvironmentDefinition, +} +import com.digitalasset.canton.logging.NamedLogging +import com.digitalasset.canton.tracing.TracingConfig +import com.digitalasset.canton.util.ShowUtil.* +import monocle.macros.syntax.lens.* + +import scala.concurrent.blocking + +abstract class ExampleIntegrationTest(configPaths: File*) + extends CommunityIntegrationTest + with IsolatedCommunityEnvironments + with HasConsoleScriptRunner { + + override lazy val environmentDefinition: CommunityEnvironmentDefinition = + CommunityEnvironmentDefinition + .fromFiles(configPaths: _*) + .addConfigTransforms( + // lets not share databases + CommunityConfigTransforms.uniqueH2DatabaseNames, + _.focus(_.monitoring.tracing.propagation).replace(TracingConfig.Propagation.Enabled), + CommunityConfigTransforms.updateAllParticipantConfigs { case (_, config) => + // to make sure that the picked up time for the snapshot is the most recent one + config + .focus(_.parameters.transferTimeProofFreshnessProportion) + .replace(NonNegativeInt.zero) + }, + CommunityConfigTransforms.uniquePorts, + ) +} + +trait HasConsoleScriptRunner { this: NamedLogging => + import org.scalatest.EitherValues.* + def runScript(scriptPath: File)(implicit env: Environment): Unit = { + val () = ConsoleScriptRunner.run(env, scriptPath.toJava, logger = logger).value + } +} + +object ExampleIntegrationTest { + lazy val examplesPath: File = "community" / "app" / "src" / "pack" / "examples" + lazy val simpleTopology: File = examplesPath / "01-simple-topology" + lazy val createDamlApp: File = examplesPath / "04-create-daml-app" + lazy val advancedConfiguration: File = examplesPath / "03-advanced-configuration" + lazy val composabilityConfiguration: File = examplesPath / "05-composability" + lazy val messagingConfiguration: File = examplesPath / "06-messaging" + lazy val repairConfiguration: File = examplesPath / "07-repair" + lazy val advancedConfTestEnv: File = + "community" / "app" / "src" / "test" / "resources" / "advancedConfDef.env" + + def ensureSystemProperties(kvs: (String, String)*): Unit = blocking(synchronized { + kvs.foreach { case (key, value) => + Option(System.getProperty(key)) match { + case Some(oldValue) => + require( + oldValue == value, + show"Trying to set incompatible system properties for ${key.singleQuoted}. Old: ${oldValue.doubleQuoted}, new: ${value.doubleQuoted}.", + ) + case None => + System.setProperty(key, value) + } + } + }) +} + +class SimplePingExampleIntegrationTest + extends ExampleIntegrationTest(simpleTopology / "simple-topology.conf") { + + "run simple-ping.canton successfully" in { implicit env => + import env.* + val port = environment.config + .domains(InstanceName.tryCreate("mydomain")) + .publicApi + .internalPort + .value + .unwrap + .toString + ensureSystemProperties(("canton-examples.mydomain-port", port)) + runScript(simpleTopology / "simple-ping.canton")(environment) + } +} + +class RepairExampleIntegrationTest + extends ExampleIntegrationTest( + advancedConfiguration / "storage" / "h2.conf", + repairConfiguration / "domain-repair-lost.conf", + repairConfiguration / "domain-repair-new.conf", + repairConfiguration / "domain-export-ledger.conf", + repairConfiguration / "domain-import-ledger.conf", + repairConfiguration / "participant1.conf", + repairConfiguration / "participant2.conf", + repairConfiguration / "participant3.conf", + repairConfiguration / "participant4.conf", + repairConfiguration / "enable-preview-commands.conf", + ) { + "deploy repair user-manual topology and initialize" in { implicit env => + ExampleIntegrationTest.ensureSystemProperties("canton-examples.dar-path" -> CantonExamplesPath) + runScript(repairConfiguration / "domain-repair-init.canton")(env.environment) + } + + "deploy ledger import user-manual topology and initialize" in { implicit env => + ExampleIntegrationTest.ensureSystemProperties( + "canton-examples.dar-path" -> CantonExamplesPath + ) + runScript(repairConfiguration / "import-ledger-init.canton")(env.environment) + } +} diff --git a/canton-3x/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/CliIntegrationTest.scala b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/CliIntegrationTest.scala new file mode 100644 index 0000000000..9970b157d6 --- /dev/null +++ b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/CliIntegrationTest.scala @@ -0,0 +1,369 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.release + +import better.files.File +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.buildinfo.BuildInfo +import com.digitalasset.canton.console.BufferedProcessLogger +import com.digitalasset.canton.logging.LogEntry +import org.scalatest.wordspec.FixtureAnyWordSpec +import org.scalatest.{Assertion, Outcome, SuiteMixin} + +import java.io.ByteArrayInputStream +import scala.sys.process.* + +/** The `CliIntegrationTest` tests Canton command line options by instantiating a Canton binary in a new process with + * the to-be-tested CLI options as arguments. + * Before being able to run these tests locally, you need to execute `sbt bundle`. + */ +class CliIntegrationTest extends FixtureAnyWordSpec with BaseTest with SuiteMixin { + + override protected def withFixture(test: OneArgTest): Outcome = test(new BufferedProcessLogger) + + override type FixtureParam = BufferedProcessLogger + + lazy val cantonDir = "enterprise/app/target/release/canton" + lazy val repositoryRootFromCantonDir = "../../../../.." + lazy val cantonBin = s"$cantonDir/bin/canton" + lazy val resourceDir = "community/app/src/test/resources" + + // turn off cache-dir to avoid compilation errors due to concurrent cache access + private lazy val cacheTurnOff = + s"$resourceDir/config-snippets/disable-ammonite-cache.conf" + + lazy val simpleConf = "community/app/src/pack/examples/01-simple-topology/simple-topology.conf" + lazy val unsupportedProtocolVersionConfig = + "enterprise/app/src/test/resources/unsupported-protocol-version.conf" + // this warning is potentially thrown when starting Canton with --no-tty + lazy val ttyWarning = + "WARN org.jline - Unable to create a system terminal, creating a dumb terminal (enable debug logging for more information)" + lazy val jsonTtyWarning = + "\"message\":\"Unable to create a system terminal, creating a dumb terminal (enable debug logging for more information)\",\"logger_name\":\"org.jline\",\"thread_name\":\"main\",\"level\":\"WARN\"" + + // Message printed out by the bootstrap script if Canton is started successfully + lazy val successMsg = "The last emperor is always the worst." + lazy val cantonShouldStartFlags = + s"--verbose --no-tty --config $cacheTurnOff --bootstrap $resourceDir/scripts/bootstrap.canton" + + "Calling Canton" should { + + "print out the help message when using the --help flag" in { processLogger => + s"$cantonBin --help" ! processLogger + checkOutput( + processLogger, + shouldContain = Seq("Usage: canton [daemon|run|generate] [options] ..."), + ) + } + + "print out the help message when using no flag" in { processLogger => + s"$cantonBin" ! processLogger + checkOutput( + processLogger, + shouldContain = Seq("Usage: canton [daemon|run|generate] [options] ..."), + shouldSucceed = false, + ) + } + + "successfully start and exit after using a run script" in { processLogger => + s"$cantonBin run $resourceDir/scripts/run.canton --config $simpleConf --verbose --no-tty" ! processLogger + checkOutput(processLogger, shouldContain = Seq(successMsg), shouldSucceed = false) + } + + "successfully start and auto-connect to local domains" in { processLogger => + s"""$cantonBin daemon + |--config $cacheTurnOff + |--bootstrap $resourceDir/scripts/startup.canton + |-C canton.parameters.manual-start=no + |--auto-connect-local + |--config $simpleConf --verbose --no-tty""".stripMargin ! processLogger + checkOutput( + processLogger, + shouldContain = Seq("connected: list(true, true)", successMsg), + ) + } + + "print out the Canton version when using the --version flag" in { processLogger => + s"$cantonBin --version" ! processLogger + checkOutput( + processLogger, + shouldContain = Seq("Canton", "Daml Libraries", BuildInfo.protocolVersions.toString), + ) + } + + "successfully start a Canton node when using a mix of a --config and -C config" in { + processLogger => + s"$cantonBin --config $simpleConf -C canton.participants.participant1.parameters.admin-workflow.bong-test-max-level=9000 $cantonShouldStartFlags" ! processLogger + checkOutput(processLogger, shouldContain = Seq(successMsg)) + } + + "successfully start a Canton node when configured only using -C" in { processLogger => + s"$cantonBin -C canton.participants.participant1.storage.type=memory -C canton.participants.participant1.admin-api.port=5012 -C canton.participants.participant1.ledger-api.port=5011 -C canton.domains.domain1.public-api.port=5018 -C canton.domains.domain1.admin-api.port=5019 -C canton.domains.domain1.storage.type=memory $cantonShouldStartFlags" ! processLogger + checkOutput(processLogger, shouldContain = Seq(successMsg)) + } + + "return an appropriate error when an invalid config is used" in { processLogger => + s"$cantonBin --config $simpleConf --config $unsupportedProtocolVersionConfig" ! processLogger + checkOutput( + processLogger, + shouldContain = Seq("unsupported-protocol-version.conf", "42"), + shouldSucceed = false, + ) + } + + "not shadow bootstrap script variables with the bootstrap script file name" in { + processLogger => + s"$cantonBin --config $cacheTurnOff --config $simpleConf --no-tty --bootstrap $resourceDir/scripts/participant1.canton " ! processLogger + + checkOutput(processLogger, shouldContain = Seq(successMsg)) + } + + "change logging directory, log level and log format when using the appropriate CLI flags" in { + processLogger => + s"$cantonBin --config $cacheTurnOff --log-truncate --log-file-appender flat --config $simpleConf --no-tty --bootstrap $resourceDir/scripts/bootstrap.canton --log-file-name log/new-name.log --log-level-canton DEBUG --log-encoder json" ! processLogger + + checkOutput(processLogger, shouldContain = Seq(successMsg)) + val logFile = File("log/new-name.log") + assert(logFile.exists) + val contents = logFile.contentAsString + assert(contents.contains("\"level\":\"DEBUG\"")) + assert(contents.contains(",\"message\":\"Starting Canton version ")) + } + + "run with log last errors disabled" in { processLogger => + s"$cantonBin --log-last-errors=false --config $simpleConf $cantonShouldStartFlags" ! processLogger + checkOutput( + processLogger, + shouldContain = Seq(successMsg), + ) + } + + "log last errors in separate file" in { processLogger => + s"$cantonBin --config $cacheTurnOff --log-truncate --log-file-appender flat --config $simpleConf --no-tty --bootstrap $resourceDir/scripts/bootstrap-with-error.canton --log-file-name log/canton-without-debug.log" ! processLogger + + // Make sure the main log file does not contain debug-level log entries + val logFile = File("log/canton-without-debug.log") + val logContents = logFile.contentAsString + assert(!logContents.contains("some logging debug event")) + assert(logContents.contains("some logging error")) + + val lastErrorsLogFile = File("log/canton_errors.log") + lastErrorsLogFile.lineCount shouldEqual 4 + val errorContents = lastErrorsLogFile.contentAsString + // Errors file must include debug output + forEvery(List("some logging debug event", "some logging error"))(errorContents.contains) + } + + "dynamically set log level with log last errors enabled" in { processLogger => + s"$cantonBin --config $cacheTurnOff --log-truncate --log-file-appender flat --config $simpleConf --no-tty --bootstrap $resourceDir/scripts/bootstrap-with-error-dynamic.canton --log-file-name log/canton-partial-debug.log" ! processLogger + + val logFile = File("log/canton-partial-debug.log") + val logContents = logFile.contentAsString + + assert(!logContents.contains("some logging debug event")) + assert(logContents.contains("final logging debug event")) + + val lastErrorsLogFile = File("log/canton_errors.log") + lastErrorsLogFile.lineCount shouldEqual 6 + val errorContents = lastErrorsLogFile.contentAsString + // Errors file must include debug output + forEvery( + List( + "some logging debug event", + "some logging error", + "final logging debug event", + "final logging error", + ) + )(errorContents.contains) + } + + "run with log file appender off" in { processLogger => + s"$cantonBin --log-file-appender=off --config $simpleConf $cantonShouldStartFlags" ! processLogger + checkOutput( + processLogger, + shouldContain = Seq(successMsg), + ) + } + + "log number of threads at info level" in { processLogger => + Process("rm -f log/canton.log", Some(new java.io.File(cantonDir))) ! + + val basicCommand = { + // user-manual-entry-begin: SetNumThreads + "bin/canton -Dscala.concurrent.context.numThreads=12 --config examples/01-simple-topology/simple-topology.conf" + // user-manual-entry-end: SetNumThreads + } + val cmd = basicCommand + " --no-tty" + + val inputStream = new ByteArrayInputStream("exit\n".getBytes) + + Process(cmd, Some(new java.io.File(cantonDir))) #< inputStream ! processLogger + + val logLines = (File(cantonDir) / "log" / "canton.log").lines() + + val expectedLine = { + // user-manual-entry-begin: LogNumThreads + "INFO c.d.c.e.EnterpriseEnvironment - Deriving 12 as number of threads from '-Dscala.concurrent.context.numThreads'." + // user-manual-entry-end: LogNumThreads + } + + forAtLeast(1, logLines) { _ should endWith(expectedLine) } + + checkOutput(processLogger) + } + + "turn a local config into a remote" in { processLogger => + s"$cantonBin generate remote-config --config $simpleConf " ! processLogger + Seq("remote-participant1.conf", "remote-participant2.conf", "remote-mydomain.conf").foreach { + check => + val fl = File(check) + assert(fl.exists, s"$check is missing") + } + } + + "let the demo run in the enterprise release" in { processLogger => + val exitCode = Process( + Seq( + "bin/canton", + "-Ddemo-test=2", + "run", + "demo/demo.sc", + "--debug", + "--log-file-name=log/demo.log", + "-c", + s"$repositoryRootFromCantonDir/$cacheTurnOff", + "-c", + "demo/demo.conf", + ), + Some(new java.io.File(cantonDir)), + ) ! processLogger + logger.debug(s"The process has ended now with $exitCode") + val out = processLogger.output() + logger.debug("Stdout is\n" + out) + exitCode shouldBe 0 + out should include(successMsg) + } + + "return failure exit code on script failure" when { + def test( + scriptFirstLine: String, + isDaemon: Boolean, + expectedExitCode: Int, + expectedErrorLines: Seq[String], + logFileName: String, + )( + extraOutputAssertion: String => Assertion = _ => succeed + )(processLogger: FixtureParam): Unit = { + File.usingTemporaryFile(prefix = "script-", suffix = ".sc") { scriptFile => + scriptFile.appendLine(scriptFirstLine) + + val runModeArgs = + if (isDaemon) Seq("daemon", "--bootstrap", scriptFile.toString) + else Seq("run", scriptFile.toString) + + val exitCode = Process( + Seq("bin/canton") ++ runModeArgs ++ Seq( + // turn off cache-dir to avoid compilation errors due to concurrent cache access + "--config", + s"$repositoryRootFromCantonDir/$cacheTurnOff", + "--debug", + "--log-file-name", + "log/" + logFileName, + "-c", + "demo/demo.conf", + ), + Some(new java.io.File(cantonDir)), + ) ! processLogger + + val out = processLogger.output() + logger.debug(s"The process has ended now with $exitCode") + + loggerFactory.assertLogsUnordered( + out + .split("\\n") + .foreach(msg => if (msg.contains("ERROR")) logger.error(msg) else logger.debug(msg)), + expectedErrorLines + .map(expectedErrorLine => + (logEntry: LogEntry) => logEntry.errorMessage should include(expectedErrorLine) + ) *, + ) + + exitCode shouldBe expectedExitCode + expectedErrorLines.foreach(expectedLine => out should include(expectedLine)) + extraOutputAssertion(out) + } + } + + "script (run) does not compile" in { + test( + scriptFirstLine = "I shall not compile", + isDaemon = false, + expectedExitCode = 1, + expectedErrorLines = Seq("Script execution failed: Compilation Failed"), + logFileName = "runDoesNotCompile.log", + )(_ should include("not found: value I")) + } + + "script (run) compiles but throws" in { + test( + scriptFirstLine = """throw new RuntimeException("some exception")""", + isDaemon = false, + expectedExitCode = 1, + expectedErrorLines = + Seq("Script execution failed: java.lang.RuntimeException: some exception"), + logFileName = "runCompilesButThrows.log", + )() + } + + "script (daemon) does not compile" in { + test( + scriptFirstLine = "I shall not compile", + isDaemon = true, + expectedExitCode = 3, // Bootstrap scripts exit with 3 + expectedErrorLines = Seq("Bootstrap script terminated with an error"), + logFileName = "daemonDoesNotCompile.log", + )(_ should include("not found: value I")) + } + + "script (daemon) compiles but throws" in { + test( + scriptFirstLine = """throw new RuntimeException("some exception")""", + isDaemon = true, + expectedExitCode = 3, // Bootstrap scripts exit with 3 + expectedErrorLines = Seq( + "Bootstrap script terminated with an error: java.lang.RuntimeException: some exception" + ), + logFileName = "daemonCompilesButThrows.log", + )() + } + } + } + + private def checkOutput( + logger: BufferedProcessLogger, + shouldContain: Seq[String] = Seq(), + shouldNotContain: Seq[String] = Seq(), + shouldSucceed: Boolean = true, + ): Unit = { + // Filter out false positives in help message for last-errors option + val filters = List( + jsonTtyWarning, + ttyWarning, + "last_errors", + "last-errors", + // slow ExecutionContextMonitor warnings + "WARN c.d.c.c.ExecutionContextMonitor - Execution context", + ) + val log = filters + .foldLeft(logger.output()) { case (log, filter) => + log.replace(filter, "") + } + .toLowerCase + shouldContain.foreach(str => assert(log.contains(str.toLowerCase()))) + shouldNotContain.foreach(str => assert(!log.contains(str.toLowerCase()))) + val undesirables = Seq("warn", "error", "exception") + if (shouldSucceed) undesirables.foreach(str => assert(!log.contains(str.toLowerCase()))) + } + +} diff --git a/canton-3x/community/app/src/test/scala/com/digitalasset/canton/metrics/MetricsFactoryTest.scala b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/metrics/MetricsFactoryTest.scala new file mode 100644 index 0000000000..d6f346c9e3 --- /dev/null +++ b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/metrics/MetricsFactoryTest.scala @@ -0,0 +1,25 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.metrics + +import com.digitalasset.canton.BaseTest +import io.opentelemetry.api.OpenTelemetry +import org.scalatest.wordspec.AnyWordSpec + +class MetricsFactoryTest extends AnyWordSpec with BaseTest { + + "metrics factory" should { + "generate valid documentation" in { + val mf = MetricsFactory.forConfig( + MetricsConfig(), + OpenTelemetry.noop(), + MetricsFactoryType.InMemory(_ => new InMemoryMetricsFactory), + ) + val (participantMetrics, domainMetrics) = mf.metricsDoc() + domainMetrics should not be empty + participantMetrics should not be empty + } + } + +} diff --git a/canton-3x/community/app/src/test/scala/com/digitalasset/canton/metrics/MetricsFactoryValues.scala b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/metrics/MetricsFactoryValues.scala new file mode 100644 index 0000000000..705af25d8e --- /dev/null +++ b/canton-3x/community/app/src/test/scala/com/digitalasset/canton/metrics/MetricsFactoryValues.scala @@ -0,0 +1,91 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.metrics + +import com.daml.metrics.api.testing.InMemoryMetricsFactory.{MetricsByName, MetricsState} +import com.daml.metrics.api.testing.{InMemoryMetricsFactory, MetricValues} +import com.daml.metrics.api.{MetricName, MetricsContext} + +import scala.collection.concurrent +import scala.collection.concurrent.TrieMap +import scala.language.implicitConversions + +trait MetricsFactoryValues extends MetricValues { + + implicit def convertFactoryToValuable( + factory: MetricHandle.LabeledMetricsFactory + ): MetricsFactoryValuable = MetricsFactoryValuable( + factory + ) + + // Not final due to scalac: "The outer reference in this type test cannot be checked at run time." + case class MetricsFactoryValuable(factory: MetricHandle.LabeledMetricsFactory) { + + def asInMemory: InMemoryMetricsFactory = factory match { + case inMemory: InMemoryMetricsFactory => inMemory + case _ => + throw new IllegalArgumentException(s"Cannot convert $factory to in-memory factory.") + } + } + + implicit def inMemoryMetricToValuable[T]( + state: concurrent.Map[MetricsContext, T] + ): InMemoryMetricValuable[T] = InMemoryMetricValuable(state) + + case class InMemoryMetricValuable[T](state: concurrent.Map[MetricsContext, T]) { + def singleMetric: T = MetricValues.singleValueFromContexts(state.toMap) + } + + implicit def metricStateToValuable(state: MetricsState): MetricsStateValuable = + MetricsStateValuable(state) + + case class MetricsStateValuable(state: MetricsState) { + + def totalMetricsRegistered: Int = { + state.gauges.size + state.asyncGauges.size + state.histograms.size + state.timers.size + state.counters.size + state.meters.size + } + + def filteredForPrefix(metricName: MetricName): MetricsState = { + def filteredMapForPrefix[Metric](state: MetricsByName[Metric]): MetricsByName[Metric] = { + TrieMap.from( + state.toMap.view.filterKeys(_.startsWith(metricName)).toMap + ) + } + + MetricsState( + timers = filteredMapForPrefix(state.timers), + gauges = filteredMapForPrefix(state.gauges), + asyncGauges = filteredMapForPrefix(state.asyncGauges), + meters = filteredMapForPrefix(state.meters), + counters = filteredMapForPrefix(state.counters), + histograms = filteredMapForPrefix(state.histograms), + ) + } + + def singleCounter( + metricName: MetricName + ): InMemoryMetricsFactory.InMemoryCounter = state.counters + .getOrElse( + metricName, + throw new IllegalStateException( + s"Cannot find counter with name $metricName in the metric state $state" + ), + ) + .singleMetric + + def singleGauge( + metricName: MetricName + ): InMemoryMetricsFactory.InMemoryGauge[?] = state.gauges + .getOrElse( + metricName, + throw new IllegalStateException( + s"Cannot find counter with name $metricName in the metric state $state" + ), + ) + .singleMetric + + def metricNames: collection.Set[MetricName] = + state.meters.keySet ++ state.counters.keySet ++ state.gauges.keySet ++ state.asyncGauges.keySet ++ state.timers.keySet ++ state.histograms.keySet + } +} diff --git a/canton-3x/community/base/src/main/protobuf/buf.yaml b/canton-3x/community/base/src/main/protobuf/buf.yaml new file mode 100644 index 0000000000..1c23c26c8e --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/buf.yaml @@ -0,0 +1,6 @@ +version: v1 +build: + excludes: + - com/digitalasset/canton/scalapb + # TODO(#14048) remove once we finalize the protobufs + - com/digitalasset/canton/topology/admin/v1 diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/crypto/admin/v0/vault_service.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/crypto/admin/v0/vault_service.proto new file mode 100644 index 0000000000..ce1b42304e --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/crypto/admin/v0/vault_service.proto @@ -0,0 +1,246 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.crypto.admin.v0; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; +import "google/protobuf/empty.proto"; + +/** + * Vault service providing programmatic access to locally stored keys and certificates + * + * We have two key-stores: a private key store where we are storing our pairs of + * public and private keys and a public key store where we are storing other + * public keys that we learned. + * + * We learn public key stores in different ways: either by importing them or + * by picking them up from internal sources (such as identity management updates). + * + * The only purpose of the public key store (where we import foreign keys) is convenience for + * identity management such that when we add identity management transactions, we can refer to + * fingerprints in commands while building them rather than having to attach public-key files. + * + * In addition, we also provide access to the locally stored certificates which are used + * either by the HTTP/1.1 sequencer client or for legal identity claims. + */ +service VaultService { + /** + * List public keys according to request filter for which we have a private key in our key vault. + * + * The request includes a filter for fingerprints which can be used for lookups. + * + * @param ListMyKeysRequest: request with optional fingerprint filter + * @return: all serialized keys and metadata that have the fingerprint filter as a substring in their fingerprint + */ + rpc ListMyKeys(ListKeysRequest) returns (ListMyKeysResponse); + + /** + * Generates a new public / private key pair for signing. + * + * Stores the private key in the vault, and returns the public key + */ + rpc GenerateSigningKey(GenerateSigningKeyRequest) returns (GenerateSigningKeyResponse); + + /** + * Generates a new public / private key pair for hybrid encryption. + * + * Stores the private key in the vault, and returns the public key + */ + rpc GenerateEncryptionKey(GenerateEncryptionKeyRequest) returns (GenerateEncryptionKeyResponse); + + /** + * Registers a KMS key for asymmetric/hybrid encryption. + * + * Stores the corresponding public key in the vault, and returns it + */ + rpc RegisterKmsEncryptionKey(RegisterKmsEncryptionKeyRequest) returns (RegisterKmsEncryptionKeyResponse); + + /** + * Registers a KMS key for signing. + * + * Stores the corresponding public key in the vault, and returns it + */ + rpc RegisterKmsSigningKey(RegisterKmsSigningKeyRequest) returns (RegisterKmsSigningKeyResponse); + + /** + * Import a public key into the registry in order to provide that Fingerprint -> PublicKey lookups + * + * @param: ImportPublicKeyRequest serialized public key to be imported + * @return: fingerprint and serialized public key of imported public key + */ + rpc ImportPublicKey(ImportPublicKeyRequest) returns (ImportPublicKeyResponse); + + /** + * Lists all public keys matching the supplied filter which are internally cached + * + * Any public key returned here can be referenced in topology transaction building + * by fingerprint. + */ + rpc ListPublicKeys(ListKeysRequest) returns (ListKeysResponse); + + /** + * Changes the wrapper key that is used to encrypt private keys when stored + */ + rpc RotateWrapperKey(RotateWrapperKeyRequest) returns (google.protobuf.Empty); + + rpc GetWrapperKeyId(GetWrapperKeyIdRequest) returns (GetWrapperKeyIdResponse); + + rpc ExportKeyPair(ExportKeyPairRequest) returns (ExportKeyPairResponse); + + rpc ImportKeyPair(ImportKeyPairRequest) returns (ImportKeyPairResponse); + + rpc DeleteKeyPair(DeleteKeyPairRequest) returns (DeleteKeyPairResponse); +} + +message GenerateCertificateRequest { + // unique identifier to be used for CN + string unique_identifier = 1; + // the private key fingerprint to use for this certificate + string certificate_key = 2; + // optional additional X500 names + string additional_subject = 3; + // the additional subject names to be added to this certificate + repeated string subject_alternative_names = 4; +} + +message GenerateCertificateResponse { + // the certificate in PEM format + string x509_cert = 1; +} + +message ListCertificateRequest { + string filterUid = 1; +} + +message ListCertificateResponse { + message Result { + string x509_cert = 1; + } + repeated Result results = 1; +} + +message ImportCertificateRequest { + // X509 certificate as PEM + string x509_cert = 1; +} + +message ImportCertificateResponse { + string certificate_id = 1; +} + +message ImportPublicKeyRequest { + // import a crypto.PublicKey protobuf serialized key + bytes public_key = 1; + // an optional name that should be stored along side the key + string name = 2; +} + +message ImportPublicKeyResponse { + // fingerprint of imported key + string fingerprint = 1; +} + +message ListKeysRequest { + // the substring that needs to match a given fingerprint + string filter_fingerprint = 1; + // the substring to filter the name + string filter_name = 2; + // filter on public key purpose + repeated com.digitalasset.canton.crypto.v0.KeyPurpose filter_purpose = 3; +} + +message PrivateKeyMetadata { + com.digitalasset.canton.crypto.v0.PublicKeyWithName public_key_with_name = 1; + + // If set the private key is stored encrypted by the wrapper key + string wrapper_key_id = 2; +} + +message ListMyKeysResponse { + repeated PrivateKeyMetadata private_keys_metadata = 1; +} + +message ListKeysResponse { + repeated com.digitalasset.canton.crypto.v0.PublicKeyWithName public_keys = 1; +} + +message GenerateSigningKeyRequest { + com.digitalasset.canton.crypto.v0.SigningKeyScheme key_scheme = 1; + + // optional descriptive name for the key + string name = 2; +} + +message GenerateSigningKeyResponse { + com.digitalasset.canton.crypto.v0.SigningPublicKey public_key = 1; +} + +message GenerateEncryptionKeyRequest { + com.digitalasset.canton.crypto.v0.EncryptionKeyScheme key_scheme = 1; + + // optional descriptive name for the key + string name = 2; +} + +message GenerateEncryptionKeyResponse { + com.digitalasset.canton.crypto.v0.EncryptionPublicKey public_key = 1; +} + +message RegisterKmsSigningKeyRequest { + string kms_key_id = 1; + + // optional descriptive name for the key + string name = 2; +} + +message RegisterKmsSigningKeyResponse { + com.digitalasset.canton.crypto.v0.SigningPublicKey public_key = 1; +} + +message RegisterKmsEncryptionKeyRequest { + string kms_key_id = 1; + + // optional descriptive name for the key + string name = 2; +} + +message RegisterKmsEncryptionKeyResponse { + com.digitalasset.canton.crypto.v0.EncryptionPublicKey public_key = 1; +} + +message RotateWrapperKeyRequest { + string new_wrapper_key_id = 1; +} + +message GetWrapperKeyIdRequest {} + +message GetWrapperKeyIdResponse { + string wrapper_key_id = 1; +} + +message ExportKeyPairRequest { + // The id of the keypair to export + string fingerprint = 1; + + // The protocol version that defines the serialization of the keypair + int32 protocol_version = 2; +} + +message ExportKeyPairResponse { + bytes key_pair = 1; +} + +message ImportKeyPairRequest { + bytes key_pair = 1; + string name = 2; +} + +message ImportKeyPairResponse {} + +message DeleteKeyPairRequest { + string fingerprint = 1; +} + +message DeleteKeyPairResponse {} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/crypto/v0/crypto.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/crypto/v0/crypto.proto new file mode 100644 index 0000000000..bb26c196aa --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/crypto/v0/crypto.proto @@ -0,0 +1,199 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.crypto.v0; + +enum HashAlgorithm { + MissingHashAlgorithm = 0; + Sha256 = 1; +} + +enum HmacAlgorithm { + MissingHmacAlgorithm = 0; + HmacSha256 = 1; +} + +message Hmac { + HmacAlgorithm algorithm = 1; + bytes hmac = 2; +} + +message Salt { + oneof algorithm { + HmacAlgorithm hmac = 1; + } + bytes salt = 2; +} + +message Signature { + SignatureFormat format = 1; + + bytes signature = 2; + + // The fingerprint/id of the keypair used to create this signature and needed to verify. + // The referenced key provides the context for the used signature scheme. + string signed_by = 3; +} + +enum SignatureFormat { + MissingSignatureFormat = 0; + + // Signature scheme specific signature format + RawSignatureFormat = 1; +} + +message PublicKey { + oneof key { + SigningPublicKey signing_public_key = 1; + EncryptionPublicKey encryption_public_key = 2; + } +} + +message PublicKeyWithName { + PublicKey public_key = 1; + + // Optional name of the public key + string name = 2; +} + +message PrivateKey { + oneof key { + SigningPrivateKey signing_private_key = 1; + EncryptionPrivateKey encryption_private_key = 2; + } +} + +enum KeyPurpose { + UnknownKeyPurpose = 0; + SigningKeyPurpose = 1; + EncryptionKeyPurpose = 2; +} + +message SigningPublicKey { + string id = 1; + + CryptoKeyFormat format = 2; + + // Serialized public key in the format specified above + bytes public_key = 3; + + // Explicit state the key scheme of the serialized public key + SigningKeyScheme scheme = 4; +} + +message SigningPrivateKey { + string id = 1; + + CryptoKeyFormat format = 2; + + // Serialized private key in the format specified above + bytes private_key = 3; + + // Explicit state the key scheme of the serialized private key + SigningKeyScheme scheme = 4; +} + +message SigningKeyPair { + SigningPublicKey public_key = 1; + SigningPrivateKey private_key = 2; +} + +enum SigningKeyScheme { + MissingSigningKeyScheme = 0; + + // Signature based on Curve25519 + // http://ed25519.cr.yp.to/ + Ed25519 = 1; + + // EC-DSA with NIST curve P-256 or P-384 + EcDsaP256 = 2; + EcDsaP384 = 3; + + // SM2 signature scheme, support dropped in >=2.7 + Sm2 = 4; +} + +message EncryptionPublicKey { + string id = 1; + + CryptoKeyFormat format = 2; + + // Serialized public key in the format specified above + bytes public_key = 3; + + // Explicit state the key scheme of the serialized public key + EncryptionKeyScheme scheme = 4; +} + +message EncryptionPrivateKey { + string id = 1; + + CryptoKeyFormat format = 2; + + // Serialized private key in the format specified above + bytes private_key = 3; + + // Explicit state the key scheme of the serialized private key + EncryptionKeyScheme scheme = 4; +} + +message EncryptionKeyPair { + EncryptionPublicKey public_key = 1; + EncryptionPrivateKey private_key = 2; +} + +enum EncryptionKeyScheme { + MissingEncryptionKeyScheme = 0; + + // ECIES with ECDH over NIST P-256, AES128 GCM, and HKDF with HMAC-SHA256 + EciesP256HkdfHmacSha256Aes128Gcm = 1; + + // ECIES with ECDH over NIST P-256, AES128 CBC, and HKDF and authentication with HMAC-SHA256 + EciesP256HmacSha256Aes128Cbc = 2; + + /* RSA with a 2048 bit key with OAEP Padding, + using SHA-256 for both the hash and in the MGF1 mask generation function along with an empty label. */ + Rsa2048OaepSha256 = 3; +} + +message CryptoKeyPair { + oneof pair { + SigningKeyPair signing_key_pair = 1; + EncryptionKeyPair encryption_key_pair = 2; + } +} + +message SymmetricKey { + CryptoKeyFormat format = 1; + + bytes key = 2; + + SymmetricKeyScheme scheme = 3; +} + +enum SymmetricKeyScheme { + MissingSymmetricKeyScheme = 0; + + // AES with 128bit keys in GCM + Aes128Gcm = 1; +} + +// Serialization format for crypto keys and signatures +enum CryptoKeyFormat { + MissingCryptoKeyFormat = 0; + + // Tink's KeySet proto serialization format + // https://github.com/google/tink/tree/1.5/proto + Tink = 1; + + // ASN.1 + DER encoding + Der = 2; + + // Raw encoding of a key, typically used for symmetric keys or Ed25519 keypairs. + Raw = 3; + + // Symbolic crypto, must only be used for testing + Symbolic = 10000; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/sequencer_authentication_service.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/sequencer_authentication_service.proto new file mode 100644 index 0000000000..cc9716116b --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/sequencer_authentication_service.proto @@ -0,0 +1,70 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.domain.api.v0; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; +import "google/protobuf/timestamp.proto"; + +// Operations to generate an authentication token for calling sequencer operations +service SequencerAuthenticationService { + // If provided with a supported protocol version and crypto type, + // will return a nonce and fingerprint of the expected key to sign this nonce + rpc Challenge(Challenge.Request) returns (Challenge.Response) {} + // If provided with a correctly signed nonce, will return a authentication token + // to be supplied to SequencerService operations + rpc Authenticate(Authentication.Request) returns (Authentication.Response) {} +} + +message Challenge { + message Request { + string member = 1; + repeated string member_protocol_versions = 2; + } + + message Response { + oneof value { + Success success = 1; + Failure failure = 2; + } + } + message Success { + string domain_version = 1; + // random nonce to be used for authentication + bytes nonce = 2; + // fingerprints of the possible keys the domain expects the member to use for authentication + // (many are supplied in case there are many instances running on behalf of the member that only have access to a subset of the registered keys) + repeated string fingerprints = 3; + } + message Failure { + uint32 code = 1; + string reason = 2; + } +} + +message Authentication { + message Request { + // member sending request + string member = 1; + // signature of the handshake nonce concatenated with the identity of the domain (fingerprint) + com.digitalasset.canton.crypto.v0.Signature signature = 2; + // nonce value that was signed is sent back to identify the challenge + bytes nonce = 3; + } + message Response { + oneof value { + Success success = 1; + Failure failure = 2; + } + } + message Success { + bytes token = 1; + google.protobuf.Timestamp expires_at = 2; + } + message Failure { + uint32 code = 1; + string reason = 2; + } +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/sequencer_connect_service.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/sequencer_connect_service.proto new file mode 100644 index 0000000000..cff5b7fc15 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/sequencer_connect_service.proto @@ -0,0 +1,60 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.domain.api.v0; + +import "com/digitalasset/canton/domain/api/v0/service_agreement.proto"; +import "com/digitalasset/canton/protocol/v0/sequencing.proto"; +import "com/digitalasset/canton/protocol/v1/sequencing.proto"; + +service SequencerConnectService { + rpc Handshake(com.digitalasset.canton.protocol.v0.Handshake.Request) returns (com.digitalasset.canton.protocol.v0.Handshake.Response); + rpc GetDomainId(SequencerConnect.GetDomainId.Request) returns (SequencerConnect.GetDomainId.Response); + rpc GetDomainParameters(SequencerConnect.GetDomainParameters.Request) returns (SequencerConnect.GetDomainParameters.Response); + rpc VerifyActive(SequencerConnect.VerifyActive.Request) returns (SequencerConnect.VerifyActive.Response); + rpc GetServiceAgreement(GetServiceAgreementRequest) returns (GetServiceAgreementResponse); +} + +message SequencerConnect { + message GetDomainId { + message Request {} + + message Response { + string domain_id = 1; + // If `sequencer_id` is an empty string, consumers of this API can assume + // that `domain_id` serves as the `sequencer_id`. + string sequencer_id = 2; + } + } + + message GetDomainParameters { + message Request {} + + message Response { + oneof parameters { + com.digitalasset.canton.protocol.v1.StaticDomainParameters parameters_v1 = 2; + } + } + } + + message VerifyActive { + message Request {} + + message Response { + oneof value { + Success success = 1; + Failure failure = 2; + } + } + + message Success { + bool is_active = 1; + } + + message Failure { + string reason = 1; + } + } +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/sequencer_connection.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/sequencer_connection.proto new file mode 100644 index 0000000000..75678f9cad --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/sequencer_connection.proto @@ -0,0 +1,32 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.domain.api.v0; + +import "google/protobuf/wrappers.proto"; +import "scalapb/scalapb.proto"; + +// Client configuration for how members should connect to the sequencer of a domain. +message SequencerConnection { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StorageProtoVersion"; + + oneof type { + Grpc grpc = 2; + } + + string alias = 3; + + message Grpc { + // connection information to sequencer (http[s]://:") + repeated string connections = 1; + // Should the connection use TLS? + bool transportSecurity = 2; + google.protobuf.BytesValue customTrustCertificates = 3; + } +} + +enum SequencerApiType { + Grpc = 0; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/sequencer_service.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/sequencer_service.proto new file mode 100644 index 0000000000..717b2957dd --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/sequencer_service.proto @@ -0,0 +1,212 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.domain.api.v0; + +import "com/digitalasset/canton/protocol/v1/signed_content.proto"; +import "com/digitalasset/canton/topology/admin/v0/topology_ext.proto"; +import "com/digitalasset/canton/v0/trace_context.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +import "scalapb/scalapb.proto"; + +// The service that a member can use to send messages to the domain and +// receive events from the domain +service SequencerService { + // Submit a send request to the sequencer for sequencing asynchronously. + // The rpc request may return an error if the request cannot be processed at all - see SendAsyncResponse for these scenarios. + // The sequencer may emit a Deliver or DeliverError event from this send. + // If the subscriber witnesses an event greater or equal to the supplied maxSequencingTime, the sequencer + // guarantees not to publish a future Deliver event for this send. + // The sequencer implementation may however publish a future DeliverError (typically used to indicate maxSequencingTime exceeded). + // + // + // The signature is checked on the server side to avoid that malicious sequencers create fake + // submission requests in multi-writer architectures where writers don't fully trust each other. + // + // Used in protocol version 5 or higher + rpc SendAsyncVersioned(SendAsyncVersionedRequest) returns (SendAsyncSignedResponse); + + // Submit a send request to the sequencer for sequencing asynchronously exactly like SendAsyncVersioned, except that this + // meant to be used only by unauthenticated members for very specific operations that do not require authentication + // such as requesting that a participant's topology data gets accepted by the topology manager + // + // Used in protocol version 5 or higher + rpc SendAsyncUnauthenticatedVersioned(SendAsyncUnauthenticatedVersionedRequest) returns (SendAsyncResponse); + + // Establishes a stream with the server to receive sequenced events from the domain after the given + // counter. The delivered events will have a sequential counter and monotonically increasing timestamp. + // TBD: Message TTL - How far back should it go when the participant initiate a subscription ? + rpc Subscribe(SubscriptionRequest) returns (stream SubscriptionResponse); + + // Same as `Subscribe(SubscriptionRequest)` but delivers byte array rather than typed + // message to support multiple versions. + rpc SubscribeVersioned(SubscriptionRequest) returns (stream VersionedSubscriptionResponse); + + // Establishes a stream with the server to receive sequenced events exactly like Subscribe, except that this is + // supposed to be used only by unauthenticated members similarly to SendAsyncUnauthenticated + rpc SubscribeUnauthenticated(SubscriptionRequest) returns (stream SubscriptionResponse); + + // Same as `SubscribeUnauthenticated(SubscriptionRequest)` but delivers byte array rather than typed + // message to support multiple versions. + rpc SubscribeUnauthenticatedVersioned(SubscriptionRequest) returns (stream VersionedSubscriptionResponse); + + // Allows a member to acknowledge that they have read all events up to and including the provided timestamp, + // and that they will never re-read these events again. This information is currently only used for informational + // purposes and to provide a watermark for which it is safe to prune earlier events from the sequencer data stores. + // There is no requirement for every event to be individually acknowledged, and in fact callers are encouraged to + // only periodically record acknowledgements (at an interval of minutes is expected to be more than sufficient for + // pruning). + rpc Acknowledge(AcknowledgeRequest) returns (google.protobuf.Empty); + + // Newer version of acknowledgements. + // To be active for protocol versions >= 4. + // The argument here is an AcknowledgeRequest wrapped in SignedContent. + // The signature is checked on the server side to avoid that malicious sequencers create fake + // acknowledgements in multi-writer architectures where writers don't fully trust each other. + // + // Allows a member to acknowledge that they have read all events up to and including the provided timestamp, + // and that they will never re-read these events again. This information is currently only used for informational + // purposes and to provide a watermark for which it is safe to prune earlier events from the sequencer data stores. + // There is no requirement for every event to be individually acknowledged, and in fact callers are encouraged to + // only periodically record acknowledgements (at an interval of minutes is expected to be more than sufficient for + // pruning). + rpc AcknowledgeSigned(com.digitalasset.canton.protocol.v1.SignedContent) returns (google.protobuf.Empty); + + // Download the topology state for a member up to including the topology transaction that made the member + // known on the domain. + rpc DownloadTopologyStateForInit(TopologyStateForInitRequest) returns (stream TopologyStateForInitResponse); +} + +message SendAsyncVersionedRequest { + // Contains a versioned SignedContent of v0 or higher + // which itself contains a versioned SubmissionRequest of v0 or higher. + bytes signed_submission_request = 1; +} + +message SendAsyncUnauthenticatedVersionedRequest { + // Contains a versioned SubmissionRequest of v0 or higher. + bytes submission_request = 1; +} + +message SendAsyncResponse { + Error error = 1; // Defined iff the response is an error. + + message Error { + oneof reason { + // The sequencer couldn't read the request (typically indicates a serialization and/or versioning bug). + string request_invalid = 1; + // The sequencer could read the request but refused to handle it (the request may violate a max size constraint). + string request_refused = 2; + // The sequencer is overloaded and does not have capacity to handle this request. + string overloaded = 3; + // The specified sender is not registered so the sequencer cannot guarantee publishing a Deliver event if the request can be sequenced. + string sender_unknown = 4; + // The sequencer is shutting down so is declining to process new requests + string shutting_down = 5; + // The sequencer is unavailable and can't currently process requests + string unavailable = 6; + // There are one or more recipients that are not registered so the sequencer cannot guarantee publishing a Deliver event if the request can be sequenced. + // This message was added in protocol version 1.1, therefore it must not be used by a sequencer operating on Canton 1.0 protocol version. + string unknown_recipients = 7; + } + } +} + +// Changes compared to SendAsyncResponse: added `Internal` and `Generic`. Note: `Generic` is not used yet, it is introduced for upgradability purposes. +message SendAsyncSignedResponse { + Error error = 1; // Defined iff the response is an error. + + message Error { + oneof reason { + // The sequencer couldn't read the request (typically indicates a serialization and/or versioning bug). + string request_invalid = 1; + // The sequencer could read the request but refused to handle it (the request may violate a max size constraint). + string request_refused = 2; + // The sequencer is overloaded and does not have capacity to handle this request. + string overloaded = 3; + // The specified sender is not registered so the sequencer cannot guarantee publishing a Deliver event if the request can be sequenced. + string sender_unknown = 4; + // The sequencer is shutting down so is declining to process new requests + string shutting_down = 5; + // The sequencer is unavailable and can't currently process requests + string unavailable = 6; + // There are one or more recipients that are not registered so the sequencer cannot guarantee publishing a Deliver event if the request can be sequenced. + // This message was added in Canton 2.1, therefore it must not be used by a sequencer operating on Canton 1.0. + string unknown_recipients = 7; + // Can be used for internal errors. For example if the sequencer is not able to get dynamic parameters from the latest snapshot. + string internal = 8; + // Generic error + string generic = 9; + } + } +} + +message SubscriptionRequest { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + string member = 1; + // the counter indicates the next event to receive + // e.g. requesting events from 0 will send the first event (index 0) when it exists. + // a subscription request acts as an implicit ack for events with a lower counter value. + int64 counter = 2; +} + +message SubscriptionResponse { + com.digitalasset.canton.protocol.v1.SignedContent signed_sequenced_event = 1; + + // optional, canton trace context + // + // DO NOT RENAME this field, as the ApiRequestLogger uses reflection to read this field. + // + // We typically use a header to transfer this information however streamed items do + // not have per item/message headers, so we instead in this instance set it in the body. + com.digitalasset.canton.v0.TraceContext trace_context = 2; // optional, canton trace context +} + +message VersionedSubscriptionResponse { + // Contains a versioned SignedContent of v0 or higher + bytes signed_sequenced_event = 1; + + // optional, canton trace context + // + // DO NOT RENAME this field, as the ApiRequestLogger uses reflection to read this field. + // + // We typically use a header to transfer this information however streamed items do + // not have per item/message headers, so we instead in this instance set it in the body. + com.digitalasset.canton.v0.TraceContext trace_context = 2; // optional, canton trace context + + // Traffic state for the subscribed member + // Optional value which is set if the sequencer enforces traffic limits + SequencedEventTrafficState traffic_state = 3; +} + +message AcknowledgeRequest { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + // Who is the acknowledgement being sent on behalf of + string member = 1; + // The timestamp in sequencer time that they are acknowledging and will not re-read earlier events. + // It is expected that the caller has a received an event with this timestamp but this is not validated. + google.protobuf.Timestamp timestamp = 2; +} + +message TopologyStateForInitRequest { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + string member = 1; +} + +message TopologyStateForInitResponse { + com.digitalasset.canton.protocol.v0.TopologyTransactions topology_transactions = 1; +} + +message SequencedEventTrafficState { + // Remaining extra traffic at the time of this event + uint64 extra_traffic_remainder = 2; + + // Total extra traffic consumed at the time of this event + uint64 extra_traffic_consumed = 3; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/service_agreement.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/service_agreement.proto new file mode 100644 index 0000000000..8bf7a1b596 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v0/service_agreement.proto @@ -0,0 +1,14 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.domain.api.v0; + +import "com/digitalasset/canton/protocol/v0/sequencing.proto"; + +message GetServiceAgreementRequest {} + +message GetServiceAgreementResponse { + com.digitalasset.canton.protocol.v0.ServiceAgreement agreement = 1; // Optional, present if the domain requires a service agreement +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v1/sequencer_connection.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v1/sequencer_connection.proto new file mode 100644 index 0000000000..fb850f73ce --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/domain/api/v1/sequencer_connection.proto @@ -0,0 +1,21 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.domain.api.v1; + +import "com/digitalasset/canton/domain/api/v0/sequencer_connection.proto"; +import "scalapb/scalapb.proto"; + +message SequencerConnections { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StorageProtoVersion"; + + repeated com.digitalasset.canton.domain.api.v0.SequencerConnection sequencer_connections = 1; + + // This field determines the minimum level of agreement, or consensus, required among the sequencers before a message + // is considered reliable and accepted by the system. + // The value set here should not be zero. However, to maintain backward compatibility with older clients, a zero value + // is interpreted as equivalent to the size of sequencer_connections. + uint32 sequencerTrustThreshold = 2; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/health/admin/v0/status_service.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/health/admin/v0/status_service.proto new file mode 100644 index 0000000000..91c1487e79 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/health/admin/v0/status_service.proto @@ -0,0 +1,106 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.health.admin.v0; + +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/wrappers.proto"; + +service StatusService { + rpc Status(google.protobuf.Empty) returns (NodeStatus); + // Streams back a zip file as byte chunks, containing a health dump of the full canton process (including other nodes) + rpc HealthDump(HealthDumpRequest) returns (stream HealthDumpChunk); +} + +message TopologyQueueStatus { + // how many topology changes are currently queued at the manager + uint32 manager = 1; + // how many topology changes are currently queued at the dispatcher + uint32 dispatcher = 2; + // how many topology changes are currently waiting to become effective (across all connected domains in the case of participants) + uint32 clients = 3; +} + +message NodeStatus { + message Status { + string id = 1; + google.protobuf.Duration uptime = 2; + map ports = 3; + bytes extra = 4; // contains extra information depending on the node type + bool active = 5; // Indicate if the node is active, usually true unless it's a replicated node that is passive + TopologyQueueStatus topology_queues = 6; // indicates the state of the topology queues (manager / dispatcher only where they exist) + repeated ComponentStatus components = 7; // Health state of component dependencies of the node + } + + message ComponentStatus { + string name = 1; + oneof status { + StatusData ok = 2; + StatusData degraded = 3; + StatusData failed = 4; + } + message StatusData { + google.protobuf.StringValue description = 1; + } + } + + message NotInitialized { + bool active = 1; // Indicate if the node is active, usually true unless it's a replicated node that is passive + } + + oneof response { + NotInitialized not_initialized = 1; // node is running but has not been initialized yet + Status success = 2; // successful response from a running and initialized node + } +} + +message HealthDumpRequest { + // Size of the byte chunks to stream back. + // Defaults to 2MB (half of the default gRPC max message size) + google.protobuf.UInt32Value chunkSize = 1; +} + +message HealthDumpChunk { + bytes chunk = 1; // A chunk of of the health dump file +} + +// domain node specific extra status info +message DomainStatusInfo { + repeated string connected_participants = 1; + // optional - only set if a sequencer is being run by the domain + SequencerHealthStatus sequencer = 2; +} + +// participant node specific extra status info +message ParticipantStatusInfo { + message ConnectedDomain { + string domain = 1; + bool healthy = 2; + } + repeated ConnectedDomain connected_domains = 1; + // Indicate if the participant node is active + // True if the participant node is replicated and is the active replica, or true if not replicated + bool active = 2; +} + +message SequencerNodeStatus { + repeated string connected_participants = 1; + // required - status of the sequencer component it is running + SequencerHealthStatus sequencer = 2; + string domain_id = 3; +} + +// status of the sequencer component +message SequencerHealthStatus { + // is the sequencer component active - can vary by implementation for what this means + bool active = 1; + // optionally set details on how sequencer is healthy/unhealthy + google.protobuf.StringValue details = 2; +} + +message MediatorNodeStatus { + string domain_id = 1; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/acs_commitments.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/acs_commitments.proto new file mode 100644 index 0000000000..ab833d6c43 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/acs_commitments.proto @@ -0,0 +1,25 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v0; + +import "google/protobuf/timestamp.proto"; +import "scalapb/scalapb.proto"; + +// A commitment to the active contract set (ACS) that is shared between two participants on a given domain at a given time. +// Given a commitment scheme to the ACS, the semantics are as follows: the sender declares that the shared ACS was +// exactly the one committed to, at every commitment tick during the specified period and as determined by the period's +// interval. The interval is assumed to be a round number of seconds. The ticks then start at the Java EPOCH time, and +// are exactly `interval` apart. +// All fields are mandatory. +message AcsCommitment { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + string domain_id = 1; + string sending_participant = 2; + string counter_participant = 3; + google.protobuf.Timestamp from_exclusive = 4; + google.protobuf.Timestamp to_inclusive = 5; + bytes commitment = 6; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/causality.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/causality.proto new file mode 100644 index 0000000000..0ec59b9568 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/causality.proto @@ -0,0 +1,49 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v0; + +import "com/digitalasset/canton/protocol/v0/participant_transfer.proto"; +import "google/protobuf/timestamp.proto"; +import "scalapb/scalapb.proto"; + +message CausalityMessage { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + string target_domain_id = 1; + TransferId transfer_id = 2; + VectorClock clock = 3; +} + +message VectorClock { + string origin_domain_id = 1; + google.protobuf.Timestamp local_ts = 2; + string party_id = 4; + map clock = 5; +} + +message CausalityUpdate { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + repeated string informeeStakeholders = 1; + google.protobuf.Timestamp ts = 2; + string domain_id = 3; + int64 request_counter = 4; + oneof tag { + TransactionUpdate transactionUpdate = 5; + TransferOutUpdate transferOutUpdate = 6; + TransferInUpdate transferInUpdate = 7; + } +} + +message TransactionUpdate {} + +message TransferOutUpdate { + TransferId transfer_id = 1; +} + +message TransferInUpdate { + TransferId transfer_id = 1; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/common.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/common.proto new file mode 100644 index 0000000000..d7d6685b30 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/common.proto @@ -0,0 +1,26 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v0; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; + +// Definitions shared by at least 2 out of mediator.proto, participant_transaction.proto and participant_transfer.proto + +enum ViewType { + MissingViewType = 0; + TransactionViewType = 1; + TransferOutViewType = 2; + TransferInViewType = 3; +} + +message GlobalKey { + bytes template_id = 1; + bytes key = 2; +} + +message DriverContractMetadata { + crypto.v0.Salt contract_salt = 1; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/mediator.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/mediator.proto new file mode 100644 index 0000000000..33581489dd --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/mediator.proto @@ -0,0 +1,19 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v0; + +message MediatorRejection { + enum Code { + MissingCode = 0; + InformeesNotHostedOnActiveParticipant = 1; + NotEnoughConfirmingParties = 2; + ViewThresholdBelowMinimumThreshold = 3; + InvalidRootHashMessage = 4; + Timeout = 5; + WrongDeclaredMediator = 6; + NonUniqueRequestUuid = 7; + } +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/mediator_response.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/mediator_response.proto new file mode 100644 index 0000000000..04fc936982 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/mediator_response.proto @@ -0,0 +1,30 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v0; + +message LocalReject { + enum Code { + MissingCode = 0; + LockedContracts = 1; + LockedKeys = 2; + InactiveContracts = 3; + DuplicateKey = 4; + CreatesExistingContract = 5; + LedgerTime = 6; + SubmissionTime = 7; + LocalTimeout = 8; + MalformedPayloads = 9; + MalformedModel = 10; + MalformedConfirmationPolicy = 11; + BadRootHashMessage = 12; + TransferOutActivenessCheck = 13; + TransferInAlreadyCompleted = 14; + TransferInAlreadyActive = 15; + TransferInAlreadyArchived = 16; + TransferInLocked = 17; + InconsistentKey = 18; + } +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/participant_transaction.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/participant_transaction.proto new file mode 100644 index 0000000000..54a879061c --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/participant_transaction.proto @@ -0,0 +1,43 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v0; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; +import "com/digitalasset/canton/protocol/v0/common.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "scalapb/scalapb.proto"; + +// Messages sent by a participant as part of the transaction protocol + +// EncryptedViewMessage + +message DeduplicationPeriod { + oneof period { + google.protobuf.Duration duration = 1; + bytes offset = 2; + } +} + +message ParticipantMetadata { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + com.digitalasset.canton.crypto.v0.Salt salt = 1; + google.protobuf.Timestamp ledger_time = 2; + google.protobuf.Timestamp submission_time = 3; + string workflow_id = 4; // optional +} + +// RootHashMessage + +message RootHashMessage { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + bytes root_hash = 1; + string domain_id = 2; + ViewType view_type = 3; + bytes payload = 4; // optional +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/participant_transfer.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/participant_transfer.proto new file mode 100644 index 0000000000..15a57c9a92 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/participant_transfer.proto @@ -0,0 +1,15 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v0; + +import "google/protobuf/timestamp.proto"; + +// Messages sent by a participant as part of the transfer protocol + +message TransferId { + string origin_domain = 1; + google.protobuf.Timestamp timestamp = 2; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/sequencing.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/sequencing.proto new file mode 100644 index 0000000000..be3731a6b6 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/sequencing.proto @@ -0,0 +1,78 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v0; + +import "com/digitalasset/canton/protocol/v1/signed_content.proto"; +import "com/digitalasset/canton/v0/trace_context.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; +import "scalapb/scalapb.proto"; + +// Messages for sequencing +message PossiblyIgnoredSequencedEvent { + int64 counter = 1; + google.protobuf.Timestamp timestamp = 2; + com.digitalasset.canton.v0.TraceContext trace_context = 3; + bool is_ignored = 4; + v1.SignedContent underlying = 5; // Optional if is_ignored == true, required otherwise +} + +// Protobuf has a hard limit on the nesting depth of messages, so this message could potentially hit +// the nesting depth limit. However this probably won't be an issue any time soon. +message RecipientsTree { + repeated string recipients = 1; + repeated com.digitalasset.canton.protocol.v0.RecipientsTree children = 2; +} + +message Recipients { + repeated RecipientsTree recipients_tree = 1; +} + +message CompressedBatch { + enum CompressionAlgorithm { + None = 0; + Gzip = 1; + } +} + +message ServiceAgreement { + string id = 1; // A unique identifier of an agreement within a domain, which must change when the legal text changes + string legal_text = 2; +} + +// Messages for performing a version handshake with a sequencer service +// Reused between sequencer services +// IMPORTANT: changing the version handshakes can lead to issues with upgrading domains - be very careful +// when changing the handshake message format +message Handshake { + message Request { + repeated string client_protocol_versions = 1; + google.protobuf.StringValue minimum_protocol_version = 2; + } + + message Response { + string server_protocol_version = 1; + oneof value { + Success success = 2; + Failure failure = 3; + } + } + + message Success {} + + message Failure { + string reason = 1; + } +} + +// Messages used for synchronization between sequencer nodes + +message AggregationRule { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + repeated string eligible_members = 1; + int32 threshold = 2; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/storage.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/storage.proto new file mode 100644 index 0000000000..58eba027f6 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/storage.proto @@ -0,0 +1,12 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v0; + +// TODO(#3256) get rid of this +// Enables storing parties in the database as a blob +message StoredParties { + repeated string parties = 1; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/synchronization.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/synchronization.proto new file mode 100644 index 0000000000..88303cfde3 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/synchronization.proto @@ -0,0 +1,22 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v0; + +import "scalapb/scalapb.proto"; + +// Messages depending on both participant_transaction.proto and participant_transfer.proto. + +message TypedSignedProtocolMessageContent { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + oneof some_signed_protocol_message { + bytes mediator_response = 2; + bytes transaction_result = 3; + bytes malformed_mediator_request_result = 4; + bytes transfer_result = 5; + bytes acs_commitment = 6; + } +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/topology.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/topology.proto new file mode 100644 index 0000000000..f48545fad6 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/topology.proto @@ -0,0 +1,155 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v0; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; +import "scalapb/scalapb.proto"; + +enum TopologyChangeOp { + Add = 0; + Remove = 1; + Replace = 2; +} + +enum TrustLevel { + MissingTrustLevel = 0; + Ordinary = 1; + Vip = 2; +} + +message ParticipantState { + RequestSide side = 1; + string domain = 2; + string participant = 3; + ParticipantPermission permission = 4; + TrustLevel trust_level = 5; +} + +// enum indicating the participant permission level +enum ParticipantPermission { + MissingParticipantPermission = 0; + // participant is active, can submit transactions + Submission = 1; + // participant is passive, can only confirm transactions + Confirmation = 2; + // participant is passive, can only observe transactions + Observation = 3; + // participant is disabled and can not be addressed + Disabled = 4; +} + +/** + * Side of the party to participant mapping request + * + * Party to participant mapping request need to be approved by both namespaces if the namespaces are different. + * We support this by allowing to split the signatures into two transactions (so both namespace controller sign the + * same transaction, just with different "RequestSide" + * + * {Both, From_Party -> To_Participant} is split into {From, From_Party -> To_Participant} + {To, From_Party -> To_Participant} + */ +enum RequestSide { + MissingRequestSide = 0; + Both = 1; + From = 2; + To = 3; +} + +// mapping that maps a party to a participant +message PartyToParticipant { + // the side of this request (if namespace of party = participant, it must be "Both", otherwise it should map to the request side) + RequestSide side = 1; + // the party that should be mapped (from) + string party = 2; + // the target participant that the party should be mapped to + string participant = 3; + // permission of the participant (what he can do) + ParticipantPermission permission = 4; +} + +// mapping a mediator to a domain +message MediatorDomainState { + RequestSide side = 1; + string domain = 2; + string mediator = 3; +} + +// namespace delegation message (equivalent to X509v3 CA root or intermediate CAs) +message NamespaceDelegation { + // fingerprint of the root key defining the namespace + string namespace = 1; + // target key of getting full rights on the namespace (if target == namespace, it's a root CA) + com.digitalasset.canton.crypto.v0.SigningPublicKey target_key = 2; + // flag indicating whether the given delegation is a root delegation or not + // a root delegation is also allowed to issue other NamespaceDelegations. + bool is_root_delegation = 3; +} + +// identifier delegation giving right to issue topology transaction on that particular identifier +message IdentifierDelegation { + // identifier + string unique_identifier = 1; + // target key getting the right + com.digitalasset.canton.crypto.v0.SigningPublicKey target_key = 2; +} + +// mapping of key-owner (participant, mediator, sequencer, domain topology manager) to a key +message OwnerToKeyMapping { + // the key owner + string key_owner = 1; + // the designated key + com.digitalasset.canton.crypto.v0.PublicKey public_key = 2; +} + +// Proves that the claimer possess the private key mentioned in the evidence +message SignedLegalIdentityClaim { + // Protobuf serialized LegalIdentityClaim + bytes claim = 1; + // Signature of the legal identity claim signed with the private key from the evidence. + com.digitalasset.canton.crypto.v0.Signature signature = 2; +} + +// A claim of a legal identity of a Canton unique identifier. +message LegalIdentityClaim { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + string unique_identifier = 1; + // Evidence of the legal identity, must contain a public key fingerprint. + oneof evidence { + // PEM serialized x509 cert + bytes x509_cert = 2; + } +} + +message VettedPackages { + // the participant vetting the packages + string participant = 1; + // the hash of the vetted packages + repeated string package_ids = 2; +} + +message SignedTopologyTransaction { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + // serialized topology transaction (protobuf bytestring) + bytes transaction = 1; + // public key used to sign the serialized topology transaction + com.digitalasset.canton.crypto.v0.SigningPublicKey key = 2; + // signature of serialized topology transaction, signed with above key + com.digitalasset.canton.crypto.v0.Signature signature = 3; +} + +message RegisterTopologyTransactionRequest { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + // the member requesting the approval of the transactions on behalf of the participant. + // typically this will be either an unauthenticated member (before the participant is active) + // or the participant itself (after it has become active) + string requested_by = 1; + string participant = 2; + string request_id = 3; + repeated bytes signed_topology_transactions = 4; + string domain_id = 5; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/traffic_control_parameters.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/traffic_control_parameters.proto new file mode 100644 index 0000000000..95752dcf31 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/traffic_control_parameters.proto @@ -0,0 +1,20 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v0; + +import "google/protobuf/duration.proto"; +import "scalapb/scalapb.proto"; + +message TrafficControlParameters { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + // In bytes, the maximum amount of base traffic that can be accumulated + uint64 max_base_traffic_amount = 1; + // Maximum duration over which the base rate can be accumulated + // Consequently, base_traffic_rate = max_base_traffic_amount / max_base_traffic_accumulation_duration + google.protobuf.Duration max_base_traffic_accumulation_duration = 3; + // Read scaling factor to compute the event cost. In parts per 10 000. + uint32 read_vs_write_scaling_factor = 4; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/versioned-google-rpc-status.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/versioned-google-rpc-status.proto new file mode 100644 index 0000000000..2c51eff1db --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v0/versioned-google-rpc-status.proto @@ -0,0 +1,17 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v0; + +import "google/rpc/status.proto"; +import "scalapb/scalapb.proto"; + +/* DA's wrapper around `google.rpc.Status` for the proper versioning and code-gen +*/ +message VersionedStatus { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StorageProtoVersion"; + + google.rpc.Status status = 1; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/common.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/common.proto new file mode 100644 index 0000000000..0304b8b718 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/common.proto @@ -0,0 +1,26 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v1; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; +import "com/digitalasset/canton/protocol/v0/common.proto"; +import "google/protobuf/timestamp.proto"; + +// Compared to v0: contract_salt was added +message SerializableContract { + message Metadata { + repeated string non_maintainer_signatories = 1; + repeated string non_signatory_stakeholders = 2; + v0.GlobalKey key = 3; // optional: only set if contract has a key + repeated string maintainers = 4; // optional; only set if contract has a key + } + + string contract_id = 1; + bytes raw_contract_instance = 2; + Metadata metadata = 3; + google.protobuf.Timestamp ledger_create_time = 4; + com.digitalasset.canton.crypto.v0.Salt contract_salt = 5; // optional +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/mediator.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/mediator.proto new file mode 100644 index 0000000000..6f8d657e0f --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/mediator.proto @@ -0,0 +1,18 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v1; + +import "com/digitalasset/canton/protocol/v1/merkle.proto"; +import "scalapb/scalapb.proto"; + +// Messages related to the transaction or transfer protocol sent by a mediator +// sbt protobufContinuityCheck will pass even if fields are added, because the mediator is trusted. + +message InformeeTree { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + GenTransactionTree tree = 1; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/mediator_response.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/mediator_response.proto new file mode 100644 index 0000000000..3283d2a426 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/mediator_response.proto @@ -0,0 +1,32 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v1; + +import "google/protobuf/empty.proto"; +import "scalapb/scalapb.proto"; + +// Definition of the MediatorResponse message which is shared between the transaction and transfer protocol +message LocalVerdict { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + oneof some_local_verdict { + google.protobuf.Empty local_approve = 1; + LocalReject local_reject = 2; // new type LocalReject + } +} + +message LocalReject { + reserved 1; // removed: enum Code + + // cause_prefix + details constitute the cause of the rejection. + string cause_prefix = 4; // new field + string details = 2; // renamed from reason + + repeated string resource = 3; // affected resources + + string error_code = 5; // new field + uint32 error_category = 6; // new field +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/merkle.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/merkle.proto new file mode 100644 index 0000000000..fc94536c70 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/merkle.proto @@ -0,0 +1,45 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v1; + +import "scalapb/scalapb.proto"; + +// Messages related to Merkle trees (including GenTransactionTree) +// Note on V1: This file is identical to the v0 +// The reason for a v1 is that MerkleSeqElement was not serialized with the protobuf version wrapper, making it +// impossible to differentiate between different Proto versions in the future. Since serialization has been changed, +// a new Proto version is being introduced here. + +message GenTransactionTree { + BlindableNode submitter_metadata = 1; + BlindableNode common_metadata = 2; + BlindableNode participant_metadata = 3; + MerkleSeq root_views = 4; +} + +message BlindableNode { + oneof blinded_or_not { + bytes unblinded = 1; + bytes blinded_hash = 2; + } +} + +message MerkleSeq { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + BlindableNode root_or_empty = 1; // optional; absent if the sequence is empty +} + +message MerkleSeqElement { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + // Fields mandatory for Branch + BlindableNode first = 1; + BlindableNode second = 2; + + // Fields mandatory for Singleton + BlindableNode data = 3; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/participant_transaction.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/participant_transaction.proto new file mode 100644 index 0000000000..34e35061ff --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/participant_transaction.proto @@ -0,0 +1,117 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v1; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; +import "com/digitalasset/canton/protocol/v0/participant_transaction.proto"; +import "com/digitalasset/canton/protocol/v0/topology.proto"; +import "com/digitalasset/canton/protocol/v1/common.proto"; +import "com/digitalasset/canton/protocol/v1/merkle.proto"; +import "google/protobuf/timestamp.proto"; +import "scalapb/scalapb.proto"; + +// Messages sent by a participant as part of the transaction protocol + +// Changes from v0: ActionDescription. + +message ViewNode { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + BlindableNode view_common_data = 1; + BlindableNode view_participant_data = 2; + MerkleSeq subviews = 3; // changed type to MerkleSeq in v1 +} + +message ViewCommonData { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + com.digitalasset.canton.crypto.v0.Salt salt = 1; + repeated Informee informees = 2; + int32 threshold = 3; +} + +message Informee { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + string party = 1; + int32 weight = 2; // optional: only set if party is confirming + com.digitalasset.canton.protocol.v0.TrustLevel required_trust_level = 3; +} + +// EncryptedViewMessage + +message ParticipantRandomnessLookup { + // An encrypted random bytestring. After decryption, used in HKDF to derive the encryption keys for the view and + // its subviews + bytes randomness = 1; + + // The fingerprint of the public key that was used to encrypt the randomness for the participant + string fingerprint = 2; +} + +message ViewParticipantMessage { + // Placeholder as the protobuf format in v1 is identical to v0. + // To avoid copy-pasting the serializer/deserializers, the Scala code reuses the v0 message. + option deprecated = true; +} + +// InformeeMessage + +message InformeeMessage { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + FullInformeeTree full_informee_tree = 1; + int32 protocol_version = 2; // added in v1 +} + +message LightTransactionViewTree { + GenTransactionTree tree = 1; + repeated bytes subview_hashes = 2; // added in v1 +} + +message FullInformeeTree { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + GenTransactionTree tree = 1; +} + +// Compared to v0: contract changed type from v0.SerializableContract to v1.SerializableContract +message CreatedContract { + v1.SerializableContract contract = 1; + bool consumed_in_core = 2; + bool rolled_back = 3; +} + +// Compared to v0: contract changed type from v0.SerializableContract to v1.SerializableContract +message InputContract { + v1.SerializableContract contract = 1; + bool consumed = 2; +} + +// Compared to v0: mediator_id field renamed to mediator; deserialization of the field have changed +message CommonMetadata { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + com.digitalasset.canton.crypto.v0.Salt salt = 1; + bytes confirmation_policy = 2; + string domain_id = 3; + string uuid = 4; + string mediator = 5; +} + +// Compared to v0: added max_sequencing_time +message SubmitterMetadata { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + com.digitalasset.canton.crypto.v0.Salt salt = 1; + repeated string act_as = 2; + string application_id = 3; + string command_id = 4; + string submitter_participant = 5; + string submission_id = 6; // optional; absent if not specified by submitter + v0.DeduplicationPeriod dedup_period = 7; + google.protobuf.Timestamp max_sequencing_time = 8; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/participant_transfer.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/participant_transfer.proto new file mode 100644 index 0000000000..390ff8d4d3 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/participant_transfer.proto @@ -0,0 +1,54 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v1; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; +import "com/digitalasset/canton/protocol/v1/merkle.proto"; +import "scalapb/scalapb.proto"; + +// Messages sent by a participant as part of the transfer protocol + +message TransferOutCommonData { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + com.digitalasset.canton.crypto.v0.Salt salt = 1; + string source_domain = 2; + repeated string stakeholders = 3; + repeated string admin_parties = 4; + string uuid = 5; + string source_mediator = 6; + int32 source_protocol_version = 7; // added in v1 +} + +message TransferViewTree { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + BlindableNode common_data = 1; + BlindableNode participant_data = 2; +} + +message TransferInMediatorMessage { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + TransferViewTree tree = 1; +} + +message TransferOutMediatorMessage { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + TransferViewTree tree = 1; +} + +message TransferInCommonData { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + com.digitalasset.canton.crypto.v0.Salt salt = 1; + string target_domain = 2; + repeated string stakeholders = 3; + string uuid = 4; + string target_mediator = 6; + int32 target_protocol_version = 7; // added in v1 +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/sequencing.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/sequencing.proto new file mode 100644 index 0000000000..b3254cbf4d --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/sequencing.proto @@ -0,0 +1,76 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v1; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; +import "com/digitalasset/canton/protocol/v0/sequencing.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; +import "google/rpc/status.proto"; +import "scalapb/scalapb.proto"; + +message StaticDomainParameters { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + bool unique_contract_keys = 1; + repeated com.digitalasset.canton.crypto.v0.SigningKeyScheme required_signing_key_schemes = 2; + repeated com.digitalasset.canton.crypto.v0.EncryptionKeyScheme required_encryption_key_schemes = 3; + repeated com.digitalasset.canton.crypto.v0.SymmetricKeyScheme required_symmetric_key_schemes = 4; + repeated com.digitalasset.canton.crypto.v0.HashAlgorithm required_hash_algorithms = 5; + repeated com.digitalasset.canton.crypto.v0.CryptoKeyFormat required_crypto_key_formats = 6; + int32 protocol_version = 7; +} + +message Envelope { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + // Contains a v2.EnvelopeContent if signatures are empty and a v0.TypedSignedProtocolMessageContent otherwise + bytes content = 1; + com.digitalasset.canton.protocol.v0.Recipients recipients = 2; + // Added in v1 + repeated com.digitalasset.canton.crypto.v0.Signature signatures = 3; +} + +message Batch { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + // Changed from v0.Envelope to v1.Envelope + repeated Envelope envelopes = 1; +} + +message CompressedBatch { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + v0.CompressedBatch.CompressionAlgorithm algorithm = 1; + // contains possibly compressed v1.Batch instead of v0.Batch in v0 + bytes compressed_batch = 2; +} + +message SequencedEvent { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + int64 counter = 1; + google.protobuf.Timestamp timestamp = 2; + string domain_id = 3; + google.protobuf.StringValue message_id = 4; // Optional: Present for DeliverError, present for the sender of a Deliver. + // Changed in v1 from v0.CompressedBatch to v1.CompressedBatch + CompressedBatch batch = 5; // Optional: Present for Deliver, absent for DeliverError + google.rpc.Status deliver_error_reason = 6; // Optional: Present for DeliverError, absent for other events +} + +// Messages used for synchronization between sequencer nodes +message SubmissionRequest { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + string sender = 1; + string message_id = 2; + bool is_request = 3; + // Changed from v0.CompressedBatch to v1.CompressedBatch + CompressedBatch batch = 4; + google.protobuf.Timestamp max_sequencing_time = 5; + google.protobuf.Timestamp timestamp_of_signing_key = 6; + v0.AggregationRule aggregation_rule = 7; // optional +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/signed_content.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/signed_content.proto new file mode 100644 index 0000000000..2e2b6fa3fe --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/signed_content.proto @@ -0,0 +1,20 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v1; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; +import "scalapb/scalapb.proto"; + +message SignedContent { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + google.protobuf.BytesValue content = 1; + // now repeated, was not repeated in v0 + repeated com.digitalasset.canton.crypto.v0.Signature signatures = 2; + google.protobuf.Timestamp timestamp_of_signing_key = 3; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/synchronization.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/synchronization.proto new file mode 100644 index 0000000000..a79be6c7f1 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/synchronization.proto @@ -0,0 +1,21 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v1; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; +import "scalapb/scalapb.proto"; + +// Messages depending on both participant_transaction.proto and participant_transfer.proto. + +// Introduced in PV=DEV +message SignedProtocolMessage { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + // Made repeated in v1 + repeated com.digitalasset.canton.crypto.v0.Signature signature = 1; + // Factored one_of out into a separate message in v1. + bytes typed_signed_protocol_message_content = 2; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/topology.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/topology.proto new file mode 100644 index 0000000000..f5a16ce182 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v1/topology.proto @@ -0,0 +1,97 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v1; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; +import "com/digitalasset/canton/protocol/v0/topology.proto"; +import "com/digitalasset/canton/protocol/v2/domain_params.proto"; +import "google/protobuf/timestamp.proto"; +import "scalapb/scalapb.proto"; + +message TopologyStateUpdate { + v0.TopologyChangeOp operation = 1; + // unique id of this element + string id = 2; + // the element of this topology transaction + oneof mapping { + v0.NamespaceDelegation namespace_delegation = 3; + v0.IdentifierDelegation identifier_delegation = 4; + v0.OwnerToKeyMapping owner_to_key_mapping = 5; + v0.PartyToParticipant party_to_participant = 6; + v0.SignedLegalIdentityClaim signed_legal_identity_claim = 7; + v0.ParticipantState participant_state = 8; + v0.VettedPackages vetted_packages = 9; + v0.MediatorDomainState mediator_domain_state = 10; + } +} + +// TODO(#15208) This can be dropped +message DomainParametersChange { + // domain affected by the new domain parameters + string domain = 1; + v2.DynamicDomainParameters domain_parameters = 2; +} + +message DomainGovernanceTransaction { + // the element of this topology transaction + oneof mapping { + DomainParametersChange domain_parameters_change = 1; + } +} + +message TopologyTransaction { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + oneof transaction { + TopologyStateUpdate state_update = 1; + DomainGovernanceTransaction domain_governance = 2; + } +} + +/* + Differences with v0.RegisterTopologyTransactionResponse + - Removed deprecated Result.State.REQUESTED + - Removed Result.unique_path +*/ +message RegisterTopologyTransactionResponse { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + message Result { + enum State { + MISSING_STATE = 0; + // the request failed with an error message + FAILED = 2; + // the request has been automatically rejected. + REJECTED = 3; + // the request has been accepted by the domain topology manager + ACCEPTED = 4; + // the request is a duplicate (i.e. yields an topology state element that already exists) + DUPLICATE = 5; + // the request is obsolete (i.e. a Remove to a topology mapping that does not exist on the server) + OBSOLETE = 6; + } + State state = 1; + string error_message = 2; + } + string requested_by = 1; + string participant = 2; + string request_id = 3; + repeated Result results = 4; + string domain_id = 5; +} + +// used by synchronization.v2 +message DomainTopologyTransactionMessage { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + // signature of the domain topology manager + com.digitalasset.canton.crypto.v0.Signature signature = 1; + string domain_id = 2; + /** latest acceptable timestamp for sequencing */ + google.protobuf.Timestamp not_sequenced_after = 3; + /** versioned signed topology transactions serialized as byte-strings such that we can compute the signature here */ + repeated bytes transactions = 4; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/domain_params.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/domain_params.proto new file mode 100644 index 0000000000..d61fdfbe1e --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/domain_params.proto @@ -0,0 +1,59 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v2; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; +import "com/digitalasset/canton/protocol/v0/traffic_control_parameters.proto"; +import "google/protobuf/duration.proto"; +import "scalapb/scalapb.proto"; + +message StaticDomainParametersX { + repeated com.digitalasset.canton.crypto.v0.SigningKeyScheme required_signing_key_schemes = 1; + repeated com.digitalasset.canton.crypto.v0.EncryptionKeyScheme required_encryption_key_schemes = 2; + repeated com.digitalasset.canton.crypto.v0.SymmetricKeyScheme required_symmetric_key_schemes = 3; + repeated com.digitalasset.canton.crypto.v0.HashAlgorithm required_hash_algorithms = 4; + repeated com.digitalasset.canton.crypto.v0.CryptoKeyFormat required_crypto_key_formats = 5; + int32 protocol_version = 7; +} + +// individual per participant limits +message ParticipantDomainLimits { + // TODO(#14052) implement per participant rate limits + uint32 max_rate = 1; + // TODO(#14052) implement per participant party limits + uint32 max_num_parties = 2; + // TODO(#14052) implement per participant package limits + uint32 max_num_packages = 3; +} + +message DynamicDomainParameters { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + google.protobuf.Duration participant_response_timeout = 1; + google.protobuf.Duration mediator_reaction_timeout = 2; + google.protobuf.Duration transfer_exclusivity_timeout = 3; + google.protobuf.Duration topology_change_delay = 4; + google.protobuf.Duration ledger_time_record_time_tolerance = 5; + google.protobuf.Duration reconciliation_interval = 6; + google.protobuf.Duration mediator_deduplication_timeout = 7; + uint32 max_request_size = 8; + // topology related validation parameters + // permissioned domain: if true, only participants which have been explicitly + // whitelisted are allowed on the domain + // TODO(#14053) re-enable this feature and turn on the PermissionedDomainIntegrationTest + bool permissioned_domain = 9; + // required packages are the default packages that must be supported by all participants on a domain + // TODO(#14054) enable this feature (necessary for canton network such that an SVC can't do a rug-pull) + repeated string required_packages = 11; + // if true, then only the required packages are supported on the given domain + // TODO(#14055) enable this feature (was requested by Curtis) + bool only_required_packages_permitted = 12; + ParticipantDomainLimits default_participant_limits = 13; + // TODO(#14050) implement party hosting limits + uint32 default_max_hosting_participants_per_party = 14; + google.protobuf.Duration sequencer_aggregate_submission_timeout = 15; + com.digitalasset.canton.protocol.v0.TrafficControlParameters traffic_control_parameters = 16; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/mediator.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/mediator.proto new file mode 100644 index 0000000000..1bcff6dd81 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/mediator.proto @@ -0,0 +1,34 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v2; + +import "com/digitalasset/canton/protocol/v0/common.proto"; +import "com/digitalasset/canton/protocol/v1/mediator_response.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; +import "scalapb/scalapb.proto"; + +message ParticipantReject { + repeated RejectionReason reasons = 1; // new version +} + +message RejectionReason { + repeated string parties = 1; + com.digitalasset.canton.protocol.v1.LocalReject reject = 2; // Using v1.LocalReject instead of v0.LocalReject +} + +message MediatorReject { + google.rpc.Status reason = 1; // Must not be OK +} + +message MalformedMediatorRequestResult { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + google.protobuf.Timestamp request_id = 1; + string domain_id = 2; + com.digitalasset.canton.protocol.v0.ViewType view_type = 3; + MediatorReject rejection = 4; // v1.MediatorReject -> v2.MediatorReject +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/mediator_response.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/mediator_response.proto new file mode 100644 index 0000000000..0941f6ef33 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/mediator_response.proto @@ -0,0 +1,35 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v2; + +import "com/digitalasset/canton/protocol/v1/mediator_response.proto"; +import "google/protobuf/timestamp.proto"; +import "scalapb/scalapb.proto"; + +// Definition of the MediatorResponse message which is shared between the transaction and transfer protocol + +message MediatorResponse { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + google.protobuf.Timestamp request_id = 1; + string sender = 2; + reserved 3; // Removed view_hash. + com.digitalasset.canton.protocol.v1.LocalVerdict local_verdict = 4; + bytes root_hash = 5; // May be empty for Malformed verdicts if the participant cannot determine the root hash. + repeated string confirming_parties = 6; // Empty iff the verdict is malformed. + string domain_id = 7; + ViewPosition view_position = 8; // Added view_position. May be empty for Malformed verdicts +} + +// New message +message ViewPosition { + repeated MerkleSeqIndex position = 1; +} + +// New message +message MerkleSeqIndex { + repeated bool is_right = 1; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/participant_transaction.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/participant_transaction.proto new file mode 100644 index 0000000000..687e8f1ed8 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/participant_transaction.proto @@ -0,0 +1,84 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v2; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; +import "com/digitalasset/canton/protocol/v0/common.proto"; +import "scalapb/scalapb.proto"; + +// Messages sent by a participant as part of the transaction protocol + +// SessionKeyLookup + +message SessionKeyLookup { + // An asymmetrically encrypted randomness to generate the session key. This session key decrypts the + // randomness field in `EncryptedViewMessage` + bytes session_key_randomness = 1; + + // The fingerprint of the public key that was used to encrypt the session key randomness + string fingerprint = 2; +} + +// EncryptedViewMessage + +message EncryptedViewMessage { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + bytes view_tree = 1; + + // The symmetric encryption scheme used to encrypt the view tree and the session key + com.digitalasset.canton.crypto.v0.SymmetricKeyScheme encryption_scheme = 2; + + com.digitalasset.canton.crypto.v0.Signature submitter_participant_signature = 3; // optional + bytes view_hash = 4; + // Randomness that is symmetrically encrypted with a session key. Used in the HKDF to derive the encryption keys for the view and its subviews + bytes randomness = 5; + repeated SessionKeyLookup session_key_randomness = 6; + string domain_id = 7; + v0.ViewType view_type = 8; +} + +// Compared to v1: templateId added to ExerciseActionDescription +message ActionDescription { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + + oneof description { + ActionDescription.CreateActionDescription create = 1; + ExerciseActionDescription exercise = 2; + ActionDescription.FetchActionDescription fetch = 3; + ActionDescription.LookupByKeyActionDescription lookup_by_key = 4; + } + + message CreateActionDescription { + string contract_id = 1; + bytes node_seed = 2; + string version = 3; + } + + message ExerciseActionDescription { + string input_contract_id = 1; + string choice = 2; + bytes chosen_value = 3; + repeated string actors = 4; + bool by_key = 5; + bytes node_seed = 6; + string version = 7; + bool failed = 8; + optional string interface_id = 9; + optional string template_id = 10; + } + + message FetchActionDescription { + string input_contract_id = 1; + repeated string actors = 2; + bool by_key = 3; + string version = 4; + } + + message LookupByKeyActionDescription { + v0.GlobalKey key = 1; + } +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/participant_transfer.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/participant_transfer.proto new file mode 100644 index 0000000000..ba6ed1f5ec --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/participant_transfer.proto @@ -0,0 +1,49 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v2; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; +import "com/digitalasset/canton/protocol/v1/common.proto"; +import "com/digitalasset/canton/protocol/v1/signed_content.proto"; +import "com/digitalasset/canton/time/v0/time_proof.proto"; +import "scalapb/scalapb.proto"; + +// Messages sent by a participant as part of the transfer protocol +message TransferOutView { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + com.digitalasset.canton.crypto.v0.Salt salt = 1; + string submitter = 2; + reserved 3; // contract_id is now contained in contract + string target_domain = 4; + com.digitalasset.canton.time.v0.TimeProof target_time_proof = 5; + int32 target_protocol_version = 6; + string submitting_participant = 7; // added in v2 + string application_id = 8; // added in v2 + string submission_id = 9; // optional - added in v2 + string workflow_id = 10; // optional - added in v2 + string command_id = 11; // added in v2 + int64 transfer_counter = 12; // added in v2 + bytes creating_transaction_id = 13; // added in v2 + com.digitalasset.canton.protocol.v1.SerializableContract contract = 14; // added in v2 +} + +message TransferInView { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + com.digitalasset.canton.crypto.v0.Salt salt = 1; + string submitter = 2; + v1.SerializableContract contract = 3; + v1.SignedContent transfer_out_result_event = 4; + bytes creating_transaction_id = 5; + int32 source_protocol_version = 6; + string submitting_participant = 7; // added in v2 + string application_id = 8; // added in v2 + string submission_id = 9; // optional - added in v2 + string workflow_id = 10; // optional - added in v2 + string command_id = 11; // added in v2 + int64 transfer_counter = 12; // added in v2 +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/topology.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/topology.proto new file mode 100644 index 0000000000..e885f2d51c --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v2/topology.proto @@ -0,0 +1,331 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v2; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; +import "com/digitalasset/canton/protocol/v2/domain_params.proto"; +import "google/protobuf/timestamp.proto"; +import "scalapb/scalapb.proto"; + +enum TopologyChangeOpX { + // TODO(#14048) should that be REPLACE instead of Replace? + // Adds a new or replaces an existing mapping + Replace = 0; + // Remove an existing mapping + Remove = 1; +} + +enum TrustLevelX { + MissingTrustLevel = 0; + Ordinary = 1; + Vip = 2; +} + +// enum indicating the participant permission level +enum ParticipantPermissionX { + MissingParticipantPermission = 0; + // participant is active, can submit transactions + Submission = 1; + // participant is passive, can only confirm transactions + Confirmation = 2; + // participant is passive, can only observe transactions + Observation = 3; +} + +// namespace delegation (equivalent to X509v3 CA root or intermediate CAs) +// if is_root_delegation==false, the target key may sign all mappings requiring a signature +// for the namespace except other NamespaceDelegationX mappings. +// authorization: a namespace delegation is either signed by the root key, or is signed by +// a key for which there exists a series of properly authorized namespace delegations +// that are ultimately signed by the root key +// UNIQUE(namespace, target_key) +message NamespaceDelegationX { + // fingerprint of the root key defining the namespace + string namespace = 1; + // target key of getting full rights on the namespace (if target == namespace, it's a root CA) + com.digitalasset.canton.crypto.v0.SigningPublicKey target_key = 2; + // flag indicating whether the given delegation is a root delegation or not + // a root delegation is also allowed to issue other NamespaceDelegations. + bool is_root_delegation = 3; +} + +// a unionspace definition that creates a new namespace supported by the +// the original owners +// authorization: the unionspace definition with serial = 1 must be authorized by all the +// owners of the namespace that form the unionspace. +// for definitions with serial > 1, we need the authorization of #threshold owners plus +// all new owners +// +// any further transaction within the unionspace other than unionspace definitions needs +// #threshold signatures of the owners +// +// UNIQUE(unionspace) +message UnionspaceDefinitionX { + // name of the unionspace, computed from the hash of its initial owners + string unionspace = 1; + // the threshold required for any subsequent update signing + int32 threshold = 2; + // the namespaces of the owners + repeated string owners = 3; +} + +// identifier delegation giving right to issue topology transaction on that particular identifier +// authorization: must have sufficient signatures by keys of the namespace of the given unique +// identifier. if the identifier is for a consortium, it can mean that we need multiple signatures +// UNIQUE (all fields) +message IdentifierDelegationX { + // identifier + string unique_identifier = 1; + // target key getting the right + com.digitalasset.canton.crypto.v0.SigningPublicKey target_key = 2; +} + +// mapping a member (participant, mediator, sequencer) to a key +// authorization: whoever controls the member uid +// UNIQUE(member, domain) +message OwnerToKeyMappingX { + // the sequencing process member + string member = 1; + // the designated keys + repeated com.digitalasset.canton.crypto.v0.PublicKey public_keys = 2; + // optionally restricted to domain + string domain = 3; +} + +// the trust certificate issued by the participant confirming that the participant +// wishes to be present on the given domain +// authorization: whoever controls the participant uid +// UNIQUE(participant,domain) +message DomainTrustCertificateX { + // the uid of the participant + string participant = 1; + // the uid of the domain that the participant trusts + string domain = 2; + // if true, the participant restricts the domains to which it accepts transfer-outs + // TODO(#1252) implement transfer permissions + bool transfer_only_to_given_target_domains = 3; + // the uids of the target domains that this participants supports for transfer if transfer + // is restricted. + repeated string target_domains = 4; +} + +// the optional trust certificate of the domain towards the participant +// authorization: whoever controls the domain uid +// UNIQUE(domain,participant) +message ParticipantDomainPermissionX { + string domain = 1; + string participant = 2; + // the permission level of the participant on this domain (usually submission) + ParticipantPermissionX permission = 3; + // the trust level of the participant on this domain + TrustLevelX trust_level = 4; + // optional individual limits for this participant + ParticipantDomainLimits limits = 5; + // optional earliest time when participant can log in (again) + // used to temporarily disable participants + // TODO(#14049) implement participant deny list + google.protobuf.Timestamp login_after = 6; +} + +// the optional hosting limits for a party on a given domain +// these limits can be used to limit the number of participants that can host a given party +// authorization: whoever controls the domain uid +// UNIQUE(domain,party) +// TODO(#14050) implement me +message PartyHostingLimitsX { + string domain = 1; + string party = 2; + // how many participants can be assigned to the given party + uint32 quota = 3; +} + +// list of packages supported by this participant +// authorization: whoever controls the participant uid +// UNIQUE(participant, domain) +message VettedPackagesX { + // the participant vetting the packages + string participant = 1; + // the hash of the vetted packages + repeated string package_ids = 2; + // optionally restricted to domain + string domain = 3; +} + +// mapping that maps a party to a participant +// authorization: whoever controls the party and all the owners of the participants that +// were not already present in the tx with serial = n - 1 +// exception: +// - a participant can disassociate itself with the party unilaterally as long there will be +// enough participants to reach "threshold" during confirmation. +// UNIQUE(party, domain) +message PartyToParticipantX { + message HostingParticipant { + // the target participant that the party should be mapped to + string participant = 1; + // permission of the participant for this particular party (the actual + // will be min of ParticipantDomainPermissionX.ParticipantPermissionX and this setting) + ParticipantPermissionX permission = 2; + } + // the party that is to be represented by the participants + string party = 1; + // the signatory threshold required by the participants to be able to act on behalf of the party. + // a mapping with threshold > 1 is considered a definition of a consortium party + uint32 threshold = 2; + // which participants will host the party. + // if threshold > 1, must be Confirmation or Observation + repeated HostingParticipant participants = 3; + // if true, then the canton transaction is addressed to the party (group broadcast) + // and not to the individual participants for efficiency reasons + bool group_addressing = 4; + // optionally restricted to domain + string domain = 5; +} + +// which parties can exercise the authority over another party +// authorization: whoever controls the consortium and all the owners of the parties that +// were not already present in the tx with serial = n - 1 +// UNIQUE(party, domain) +message AuthorityOfX { + // the party for which some other parties can gain authority + string party = 1; + // the signatory threshold required by the parties to be able to act on behalf of the consortium within Daml + uint32 threshold = 2; + // the list of parties which can gain authority + repeated string parties = 3; + // optionally restricted to domain + string domain = 4; +} + +// which dynamic domain parameters are supposed to be used on the given domain +// authorization: whoever controls the domain +// UNIQUE(domain) +message DomainParametersStateX { + // domain affected by the new domain parameters + string domain = 1; + DynamicDomainParameters domain_parameters = 2; +} + +// which mediators and mediator groups are active on the given domain +// authorization: whoever controls the domain +// UNIQUE(domain, group) +message MediatorDomainStateX { + // the domain-id of the mediator group + string domain = 1; + // the group-id used for sharding multiple mediator groups + uint32 group = 2; + // the signature threshold required to reach consensus among the mediators + uint32 threshold = 3; + // the list of active mediators (uid) within the group + repeated string active = 4; + // the list of observing mediators (uid) within the group (read messages but don't respond) + repeated string observers = 5; +} + +// which sequencers are active on the given domain +// authorization: whoever controls the domain +// UNIQUE(domain) +message SequencerDomainStateX { + // the domain-id of the sequencer group + string domain = 1; + // the signature threshold required to reach consensus + uint32 threshold = 2; + // the list of active sequencers + repeated string active = 3; + // the list of observing sequencers (uid) within the group (read messages but don't respond) + repeated string observers = 4; + // TODO(#14046) add payload for BFT-configuration + // bytes payload = 5; +} + +// explicitly invalidate topology transactions for good +// this can be used by the domain to offboard participants forcefully or to +// remove stray topology transactions +// authorization: whoever controls the domain +// UNIQUE(domain) +message PurgeTopologyTransactionX { + // the domain-id + string domain = 1; + // the list of mappings to remove from this domain + repeated TopologyMappingX mappings = 2; +} + +// UNIQUE(domain, member) +// authorization: whoever controls the domain +message TrafficControlStateX { + // the domain-id + string domain = 1; + // member to change traffic control state for + string member = 2; + // total amount of traffic ever purchased, including the new top up amount, which must be positive and monotonically increasing. + uint64 total_extra_traffic_limit = 3; +} + +message TopologyMappingX { + oneof mapping { + NamespaceDelegationX namespace_delegation = 1; + IdentifierDelegationX identifier_delegation = 2; + UnionspaceDefinitionX unionspace_definition = 3; + + OwnerToKeyMappingX owner_to_key_mapping = 4; + + DomainTrustCertificateX domain_trust_certificate = 5; + ParticipantDomainPermissionX participant_permission = 6; + PartyHostingLimitsX party_hosting_limits = 7; + VettedPackagesX vetted_packages = 8; + + PartyToParticipantX party_to_participant = 9; + AuthorityOfX authority_of = 10; + + DomainParametersStateX domain_parameters_state = 11; + MediatorDomainStateX mediator_domain_state = 12; + SequencerDomainStateX sequencer_domain_state = 13; + PurgeTopologyTransactionX purge_topology_txs = 14; + TrafficControlStateX traffic_control_state = 15; + } +} + +message TopologyTransactionX { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + TopologyChangeOpX operation = 1; + // Serial identifier of this transaction used to prevent replay attacks. + // A topology transaction is replacing the existing transaction with serial - 1 + // that has the same unique key. + uint32 serial = 2; + // the element of this topology transaction + TopologyMappingX mapping = 3; +} + +message SignedTopologyTransactionX { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + // serialized topology transaction (protobuf bytestring) + bytes transaction = 1; + // multiple signatures (at least one) + repeated com.digitalasset.canton.crypto.v0.Signature signatures = 2; + // if true, this transaction is just a proposal. this means that every signature is valid, + // but the signatures are insufficient to properly authorize the transaction. + // proposals are distributed via the topology channel too. proposals will be pruned automatically + // when the nodes are pruned + // TODO(#14045) implement pruning + bool proposal = 3; +} + +/** Accepted topology transaction + * + * A member sends topology transactions to the topology transaction broadcast address. + * The transactions are validated by all members individually against their respective domain store, + * including the member the submitted the broadcast. + */ +message TopologyTransactionsBroadcastX { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + message Broadcast { + string broadcast_id = 1; + repeated SignedTopologyTransactionX transactions = 2; + } + string domain = 1; + repeated Broadcast broadcasts = 2; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v3/mediator.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v3/mediator.proto new file mode 100644 index 0000000000..4ee6a483d4 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v3/mediator.proto @@ -0,0 +1,42 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v3; + +import "com/digitalasset/canton/protocol/v2/mediator.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +import "scalapb/scalapb.proto"; + +message Verdict { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + oneof some_verdict { + google.protobuf.Empty approve = 1; + com.digitalasset.canton.protocol.v2.ParticipantReject participant_reject = 2; + com.digitalasset.canton.protocol.v2.MediatorReject mediator_reject = 3; // new version + } +} + +message TransactionResultMessage { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + google.protobuf.Timestamp request_id = 1; + Verdict verdict = 2; // v2.Verdict -> v3.Verdict + bytes root_hash = 3; + string domain_id = 4; +} + +message TransferResult { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + google.protobuf.Timestamp request_id = 1; + oneof domain { + string source_domain = 2; // result for transfer-out request + string target_domain = 3; // result for transfer-in request + } + repeated string informees = 4; + Verdict verdict = 5; // new Verdict type +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v3/participant_transaction.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v3/participant_transaction.proto new file mode 100644 index 0000000000..c4f22c5018 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v3/participant_transaction.proto @@ -0,0 +1,43 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v3; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; +import "com/digitalasset/canton/protocol/v0/common.proto"; +import "com/digitalasset/canton/protocol/v1/participant_transaction.proto"; +import "com/digitalasset/canton/protocol/v2/participant_transaction.proto"; +import "scalapb/scalapb.proto"; + +// Messages sent by a participant as part of the transaction protocol + +// Compared to v2: action_description changed type from v1.ActionDescription to v3.ActionDescription +message ViewParticipantData { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion"; + com.digitalasset.canton.crypto.v0.Salt salt = 1; + repeated v1.InputContract core_inputs = 2; + repeated v1.CreatedContract created_core = 3; + repeated string created_in_subview_archived_in_core = 4; // ids of contracts created in a subview and archived in the core + repeated ViewParticipantData.ResolvedKey resolved_keys = 5; + v2.ActionDescription action_description = 6; + ViewParticipantData.RollbackContext rollback_context = 7; // optional; defaults to the empty RollbackContext if omitted. + + message FreeKey { + repeated string maintainers = 1; + } + + message ResolvedKey { + v0.GlobalKey key = 1; + oneof resolution { + string contract_id = 2; // the key resolves to a contract + FreeKey free = 3; // the key is free and these are the maintainers + } + } + + message RollbackContext { + repeated int32 rollback_scope = 1; + int32 next_child = 2; + } +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v4/synchronization.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v4/synchronization.proto new file mode 100644 index 0000000000..c0678c4b63 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v4/synchronization.proto @@ -0,0 +1,37 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protocol.v4; + +import "com/digitalasset/canton/protocol/v0/participant_transaction.proto"; +import "com/digitalasset/canton/protocol/v0/topology.proto"; +import "com/digitalasset/canton/protocol/v1/participant_transaction.proto"; +import "com/digitalasset/canton/protocol/v1/participant_transfer.proto"; +import "com/digitalasset/canton/protocol/v1/topology.proto"; +import "com/digitalasset/canton/protocol/v2/participant_transaction.proto"; +import "com/digitalasset/canton/protocol/v2/topology.proto"; +import "scalapb/scalapb.proto"; + +// Messages depending on both participant_transaction.proto and participant_transfer.proto. + +message EnvelopeContent { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + oneof some_envelope_content { + v1.InformeeMessage informee_message = 1; + // Removed field signed_message in v2 + v2.EncryptedViewMessage encrypted_view_message = 3; + // TODO(#15223) remove once we move to 3.0 + v1.DomainTopologyTransactionMessage domain_topology_transaction_message = 5; + v1.TransferOutMediatorMessage transfer_out_mediator_message = 6; + v1.TransferInMediatorMessage transfer_in_mediator_message = 7; + v0.RootHashMessage root_hash_message = 8; + // TODO(#15223) remove once we move to 3.0 + v0.RegisterTopologyTransactionRequest register_topology_transaction_request = 9; + v1.RegisterTopologyTransactionResponse register_topology_transaction_response = 10; + // Removed field causality_update in v2 + v2.TopologyTransactionsBroadcastX topology_transactions_broadcast = 12; + } +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/pruning/admin/v0/pruning.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/pruning/admin/v0/pruning.proto new file mode 100644 index 0000000000..0c452fd5c3 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/pruning/admin/v0/pruning.proto @@ -0,0 +1,86 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.pruning.admin.v0; + +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +message SetSchedule { + message Request { + PruningSchedule schedule = 1; + } + message Response {} +} + +message GetSchedule { + message Request {} + message Response { + PruningSchedule schedule = 1; + } +} + +message SetParticipantSchedule { + message Request { + ParticipantPruningSchedule schedule = 1; + } + message Response {} +} + +message GetParticipantSchedule { + message Request {} + message Response { + ParticipantPruningSchedule schedule = 1; + } +} + +message PruningSchedule { + string cron = 1; + google.protobuf.Duration max_duration = 2; + google.protobuf.Duration retention = 3; +} + +message ParticipantPruningSchedule { + PruningSchedule schedule = 1; + bool prune_internally_only = 2; +} + +message ClearSchedule { + message Request {} + message Response {} +} + +message SetCron { + message Request { + string cron = 1; + } + message Response {} +} + +message SetMaxDuration { + message Request { + google.protobuf.Duration max_duration = 1; + } + message Response {} +} + +message SetRetention { + message Request { + google.protobuf.Duration retention = 1; + } + message Response {} +} + +message LocatePruningTimestamp { + message Request { + // the index to look up the offset for, needs to be 1 or larger + // i.e. 1 for returning the first timestamp, 2 for the second timestamp etc + int32 index = 1; + } + message Response { + // optional, not set if less than `index` events exist + google.protobuf.Timestamp timestamp = 1; + } +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/scalapb/package.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/scalapb/package.proto new file mode 100644 index 0000000000..a118cbc6f3 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/scalapb/package.proto @@ -0,0 +1,14 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton; + +import "scalapb/scalapb.proto"; + +option (scalapb.options) = { + scope: PACKAGE + preserve_unknown_fields: false + no_default_values_in_constructor: true +}; diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/time/admin/v0/domain_time_service.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/time/admin/v0/domain_time_service.proto new file mode 100644 index 0000000000..17c2dcf762 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/time/admin/v0/domain_time_service.proto @@ -0,0 +1,35 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.domain.api.v0; + +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +service DomainTimeService { + // fetch the domain time which has been received within a supplied freshness bound defined by the local clock + rpc FetchTime(FetchTimeRequest) returns (FetchTimeResponse); + // await for the supplied time to be reached on the target domain + rpc AwaitTime(AwaitTimeRequest) returns (google.protobuf.Empty); +} + +message FetchTimeRequest { + // optional - must be specified for participants but can be defaulted for domain entities as they only have one possible domain + google.protobuf.StringValue domain_id = 1; + google.protobuf.Duration freshness_bound = 2; +} + +message FetchTimeResponse { + google.protobuf.Timestamp timestamp = 1; +} + +message AwaitTimeRequest { + // optional - must be specified for participants but can be defaulted for domain entities as they only have one possible domain + google.protobuf.StringValue domain_id = 1; + // the domain time to wait for + google.protobuf.Timestamp timestamp = 2; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/time/admin/v0/time_tracker_config.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/time/admin/v0/time_tracker_config.proto new file mode 100644 index 0000000000..5737ccd65a --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/time/admin/v0/time_tracker_config.proto @@ -0,0 +1,26 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.time.admin.v0; + +import "google/protobuf/duration.proto"; + +// these structures are only used for where the configuration can be adjusted +// at runtime using the participant domain connectivity commands and services. +// however they are defined here in common to be sat alongside the scala definitions +// themselves. + +message TimeProofRequestConfig { + google.protobuf.Duration initialRetryDelay = 1; + google.protobuf.Duration maxRetryDelay = 2; + google.protobuf.Duration maxSequencingDelay = 3; +} + +message DomainTimeTrackerConfig { + google.protobuf.Duration observationLatency = 1; + google.protobuf.Duration patienceDuration = 2; + google.protobuf.Duration minObservationDuration = 3; + TimeProofRequestConfig timeProofRequest = 4; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/time/v0/time_proof.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/time/v0/time_proof.proto new file mode 100644 index 0000000000..9ae5f1672f --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/time/v0/time_proof.proto @@ -0,0 +1,13 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.time.v0; + +import "com/digitalasset/canton/protocol/v0/sequencing.proto"; + +// Messages for sequencing +message TimeProof { + com.digitalasset.canton.protocol.v0.PossiblyIgnoredSequencedEvent event = 1; // must be an ordinary event +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/initialization_service.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/initialization_service.proto new file mode 100644 index 0000000000..e63a2d9de7 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/initialization_service.proto @@ -0,0 +1,53 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.topology.admin.v0; + +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +/** + * The node initialization service + */ +service InitializationService { + /** + * Initialize the node with the unique identifier (can and must be done once) + * + * When a domain or participant instance starts for the first time, we need to bind it + * to a globally unique stable identifier before we can continue with the + * initialization procedure. + * + * This method is only used once during initialization. + */ + rpc InitId(InitIdRequest) returns (InitIdResponse); + + /** + * Returns the id of the node (or empty if not initialized) + */ + rpc GetId(google.protobuf.Empty) returns (GetIdResponse); + + /** + * Returns the current time of the node (used for testing with static time) + */ + rpc CurrentTime(google.protobuf.Empty) returns (google.protobuf.Timestamp); +} + +message InitIdRequest { + string identifier = 1; + string fingerprint = 2; + // optional - instance id, if supplied value is empty then one will be generated + string instance = 3; +} + +message InitIdResponse { + string unique_identifier = 1; + string instance = 2; +} + +message GetIdResponse { + bool initialized = 1; + string unique_identifier = 2; + string instance = 3; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/topology_aggregation_service.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/topology_aggregation_service.proto new file mode 100644 index 0000000000..f4f6979f76 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/topology_aggregation_service.proto @@ -0,0 +1,100 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.topology.admin.v0; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; +import "com/digitalasset/canton/protocol/v0/topology.proto"; +import "google/protobuf/timestamp.proto"; + +/** + * Topology information aggregation service + * + * This service allows deep inspection into the aggregated topology state. + * The services runs both on the domain and on the participant and uses the same + * data. The service provides GRPC access to the information aggregated by the identity providing + * service client. + */ +service TopologyAggregationService { + /** + * Obtain information about a certain set of active parties matching a given filter criterion. + * + * The request allows to filter per (domain, party, asOf) where the domain and party argument are + * used in order to filter the result list using the `startsWith` method on the respective resulting string. + * + * As such, if you just need to do a lookup, then define a precise filter. Given the uniqueness of the + * identifiers (and the fact that the identifiers contain key fingerprints), we should only ever get a single + * result back if we are doing a precise lookup. + * + * The response is a sequence of tuples (party, domain, participant, privilege, trust-level). + * The response is restricted to active parties and their active participants. + */ + rpc ListParties(ListPartiesRequest) returns (ListPartiesResponse); + + /** + * Obtain key owner information matching a given filter criterion. + * + * Key owners in the system are different types of entities: Participant, Mediator, Domain Topology Manager and + * Sequencer. The present method allows to define a filter to search for a key owner + * using filters on (asOf, domain, ownerType, owner) + * + * The response is a sequence of (domain, ownerType, owner, keys) where keys is a sequence of + * (fingerprint, bytes, key purpose). As such, we can use this method to export currently used signing or encryption + * public keys. + * + * This method is quite general, as depending on the arguments, very different results can be obtained. + * + * Using OwnerType = 'Participant' allows to query for all participants. + * Using OwnerType = 'Sequencer' allows to query for all sequencers defined. + */ + rpc ListKeyOwners(ListKeyOwnersRequest) returns (ListKeyOwnersResponse); +} + +message ListPartiesRequest { + google.protobuf.Timestamp as_of = 1; + int32 limit = 2; + string filter_domain = 3; + string filter_party = 4; + string filter_participant = 5; +} + +message ListPartiesResponse { + message Result { + string party = 1; + message ParticipantDomains { + message DomainPermissions { + string domain = 1; + com.digitalasset.canton.protocol.v0.ParticipantPermission permission = 2; + } + string participant = 1; + /** + * permissions of this participant for this party on a per domain basis + * + * for records that only exist in the authorized store, this list will be empty. + */ + repeated DomainPermissions domains = 2; + } + repeated ParticipantDomains participants = 2; + } + repeated Result results = 2; +} + +message ListKeyOwnersRequest { + google.protobuf.Timestamp as_of = 1; + int32 limit = 2; + string filter_domain = 3; + string filter_key_owner_type = 4; + string filter_key_owner_uid = 5; +} + +message ListKeyOwnersResponse { + message Result { + string domain = 1; + string key_owner = 2; + repeated com.digitalasset.canton.crypto.v0.SigningPublicKey signing_keys = 3; + repeated com.digitalasset.canton.crypto.v0.EncryptionPublicKey encryption_keys = 4; + } + repeated Result results = 1; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/topology_ext.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/topology_ext.proto new file mode 100644 index 0000000000..413609d2df --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/topology_ext.proto @@ -0,0 +1,29 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +// TODO(#15208) rename package once we move to 3.0 +package com.digitalasset.canton.protocol.v0; + +import "google/protobuf/timestamp.proto"; + +/** Topology transaction collection used during bootstrapping of domain nodes and on the admin API + * + * Please note that this message should not be in the protocol package, as it is not used on the protocol + * itself but on the admin apis. But as we can't rename the package name due to backwards compatibility + * guarantees, we've moved the file out of the protocol.proto file, but kept the package name. + */ +message TopologyTransactions { + message Item { + google.protobuf.Timestamp sequenced = 4; + google.protobuf.Timestamp valid_from = 1; + google.protobuf.Timestamp valid_until = 2; + /** Versioned signed topology transactions serialized as byte-strings. Note that we use here the serialized + version that contains the version number. Using the "Versioned" version directly here would create a circular + dependency between the proto files. The only proper solution would be to move this into a third separate file + with "cross version data files", which we might do at some point. */ + bytes transaction = 3; + } + repeated Item items = 1; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/topology_manager_read_service.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/topology_manager_read_service.proto new file mode 100644 index 0000000000..06919b38c7 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/topology_manager_read_service.proto @@ -0,0 +1,210 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.topology.admin.v0; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; +import "com/digitalasset/canton/protocol/v0/topology.proto"; +import "com/digitalasset/canton/protocol/v2/domain_params.proto"; +import "com/digitalasset/canton/topology/admin/v0/topology_ext.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +// domain + idm + participant +service TopologyManagerReadService { + rpc ListAvailableStores(ListAvailableStoresRequest) returns (ListAvailableStoresResult); + rpc ListPartyToParticipant(ListPartyToParticipantRequest) returns (ListPartyToParticipantResult); + rpc ListOwnerToKeyMapping(ListOwnerToKeyMappingRequest) returns (ListOwnerToKeyMappingResult); + rpc ListNamespaceDelegation(ListNamespaceDelegationRequest) returns (ListNamespaceDelegationResult); + rpc ListIdentifierDelegation(ListIdentifierDelegationRequest) returns (ListIdentifierDelegationResult); + rpc ListSignedLegalIdentityClaim(ListSignedLegalIdentityClaimRequest) returns (ListSignedLegalIdentityClaimResult); + rpc ListParticipantDomainState(ListParticipantDomainStateRequest) returns (ListParticipantDomainStateResult); + rpc ListMediatorDomainState(ListMediatorDomainStateRequest) returns (ListMediatorDomainStateResult); + rpc ListVettedPackages(ListVettedPackagesRequest) returns (ListVettedPackagesResult); + rpc ListDomainParametersChanges(ListDomainParametersChangesRequest) returns (ListDomainParametersChangesResult); + rpc ListAll(ListAllRequest) returns (ListAllResponse); +} + +message ListNamespaceDelegationRequest { + BaseQuery base_query = 1; + string filter_namespace = 2; +} + +message ListNamespaceDelegationResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v0.NamespaceDelegation item = 2; + string target_key_fingerprint = 3; + } + repeated Result results = 1; +} + +message ListIdentifierDelegationRequest { + BaseQuery base_query = 1; + string filter_uid = 2; +} + +message ListIdentifierDelegationResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v0.IdentifierDelegation item = 2; + string target_key_fingerprint = 3; + } + repeated Result results = 1; +} + +message BaseQuery { + string filter_store = 1; + bool use_state_store = 2; + com.digitalasset.canton.protocol.v0.TopologyChangeOp operation = 3; + /** if true, then we'll filter the results according to above defined operation */ + bool filter_operation = 4; + message TimeRange { + google.protobuf.Timestamp from = 2; + google.protobuf.Timestamp until = 3; + } + oneof time_query { + google.protobuf.Timestamp snapshot = 5; + google.protobuf.Empty head_state = 6; + TimeRange range = 7; + } + string filter_signed_key = 8; + google.protobuf.StringValue protocol_version = 9; +} + +message BaseResult { + string store = 1; + // TODO(#15223) reorder once we break the admin api + google.protobuf.Timestamp sequenced = 7; + google.protobuf.Timestamp valid_from = 2; + google.protobuf.Timestamp valid_until = 3; + com.digitalasset.canton.protocol.v0.TopologyChangeOp operation = 4; + bytes serialized = 5; + string signed_by_fingerprint = 6; +} + +message ListPartyToParticipantResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v0.PartyToParticipant item = 2; + } + repeated Result results = 2; +} + +message ListPartyToParticipantRequest { + BaseQuery base_query = 1; + string filter_party = 2; + string filter_participant = 3; + message FilterRequestSide { + com.digitalasset.canton.protocol.v0.RequestSide value = 1; + } + FilterRequestSide filter_request_side = 4; + message FilterPermission { + com.digitalasset.canton.protocol.v0.ParticipantPermission value = 1; + } + FilterPermission filter_permission = 5; +} + +message ListOwnerToKeyMappingRequest { + BaseQuery base_query = 1; + string filter_key_owner_type = 2; + string filter_key_owner_uid = 3; + message FilterKeyPurpose { + com.digitalasset.canton.crypto.v0.KeyPurpose value = 1; + } + FilterKeyPurpose filter_key_purpose = 4; +} + +message ListOwnerToKeyMappingResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v0.OwnerToKeyMapping item = 2; + string key_fingerprint = 3; + } + repeated Result results = 1; +} + +message ListSignedLegalIdentityClaimRequest { + BaseQuery base_query = 1; + string filter_uid = 2; +} + +message ListSignedLegalIdentityClaimResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v0.SignedLegalIdentityClaim item = 2; + } + repeated Result results = 1; +} + +message ListVettedPackagesRequest { + BaseQuery base_query = 1; + string filter_participant = 2; +} + +message ListVettedPackagesResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v0.VettedPackages item = 2; + } + repeated Result results = 1; +} + +message ListDomainParametersChangesRequest { + BaseQuery base_query = 1; +} + +message ListDomainParametersChangesResult { + message Result { + BaseResult context = 1; + oneof parameters { + com.digitalasset.canton.protocol.v2.DynamicDomainParameters v1 = 3; + } + } + + repeated Result results = 1; +} + +message ListAvailableStoresRequest {} +message ListAvailableStoresResult { + repeated string store_ids = 1; +} + +message ListParticipantDomainStateRequest { + BaseQuery base_query = 1; + string filter_domain = 2; + string filter_participant = 3; +} + +message ListParticipantDomainStateResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v0.ParticipantState item = 2; + } + repeated Result results = 1; +} + +message ListMediatorDomainStateRequest { + BaseQuery base_query = 1; + string filter_domain = 2; + string filter_mediator = 3; +} + +message ListMediatorDomainStateResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v0.MediatorDomainState item = 2; + } + repeated Result results = 1; +} + +message ListAllRequest { + BaseQuery base_query = 1; +} + +message ListAllResponse { + com.digitalasset.canton.protocol.v0.TopologyTransactions result = 1; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/topology_manager_write_service.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/topology_manager_write_service.proto new file mode 100644 index 0000000000..fd3ac81dd7 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v0/topology_manager_write_service.proto @@ -0,0 +1,207 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.topology.admin.v0; + +import "com/digitalasset/canton/crypto/v0/crypto.proto"; +import "com/digitalasset/canton/protocol/v0/topology.proto"; +import "com/digitalasset/canton/protocol/v2/domain_params.proto"; + +/** + * Write operations on the local topology manager. + * + * Both, participant and domain run a local topology manager exposing the same write interface. + */ +service TopologyManagerWriteService { + /** + * Authorizes a party to participant mapping change (add/remove) on the node local topology manager. + */ + rpc AuthorizePartyToParticipant(PartyToParticipantAuthorization) returns (AuthorizationSuccess); + + /** + * Authorizes an owner to key mapping change (add/remove) on the local topology manager + */ + rpc AuthorizeOwnerToKeyMapping(OwnerToKeyMappingAuthorization) returns (AuthorizationSuccess); + + /** + * Authorizes a namespace delegation (root or intermediate CA) (add/remove) on the local topology manager + */ + rpc AuthorizeNamespaceDelegation(NamespaceDelegationAuthorization) returns (AuthorizationSuccess); + + /** + * Authorizes a new identifier delegation (identifier certificate) (add/remove) on the local topology manager + */ + rpc AuthorizeIdentifierDelegation(IdentifierDelegationAuthorization) returns (AuthorizationSuccess); + + /** + * Authorizes a new package vetting transaction + */ + rpc AuthorizeVettedPackages(VettedPackagesAuthorization) returns (AuthorizationSuccess); + + /** Authorizes a change of parameters for the domain */ + rpc AuthorizeDomainParametersChange(DomainParametersChangeAuthorization) returns (AuthorizationSuccess); + + /** + * Authorizes a participant domain state + */ + rpc AuthorizeParticipantDomainState(ParticipantDomainStateAuthorization) returns (AuthorizationSuccess); + + /** + * Authorizes a mediator domain state + */ + rpc AuthorizeMediatorDomainState(MediatorDomainStateAuthorization) returns (AuthorizationSuccess); + + /** + * Adds a signed topology transaction to the Authorized store + */ + rpc AddSignedTopologyTransaction(SignedTopologyTransactionAddition) returns (AdditionSuccess); +} + +message AuthorizationSuccess { + bytes serialized = 1; +} + +message AdditionSuccess {} + +message SignedTopologyTransactionAddition { + bytes serialized = 1; +} + +message AuthorizationData { + /** Add / Remove / Replace */ + com.digitalasset.canton.protocol.v0.TopologyChangeOp change = 1; + + /** + * Fingerprint of the key signing the authorization + * + * The signing key is used to identify a particular `NamespaceDelegation` or `IdentifierDelegation` certificate, + * which is used to justify the given authorization. + */ + string signed_by = 2; + + /** if true, the authorization will also replace the existing (makes only sense for adds) */ + bool replace_existing = 3; + + /** Force change even if dangerous */ + bool force_change = 4; +} + +message NamespaceDelegationAuthorization { + AuthorizationData authorization = 1; + + // The namespace for which the authorization is issued. + string namespace = 2; + + /** + * The fingerprint of the signing key which will be authorized to issue topology transactions for this namespace. + * + * The key needs to be present in the local key registry either by being locally + * generated or by having been previously imported. + */ + string fingerprint_of_authorized_key = 3; + + /** + * Flag indicating whether authorization is a root key delegation + */ + bool is_root_delegation = 4; +} + +message IdentifierDelegationAuthorization { + AuthorizationData authorization = 1; + string identifier = 2; + + /** + * The fingerprint of the signing key which will be authorized to issue topology transaction for this particular identifier. + * + * As with `NamespaceDelegation`s, the key needs to be present locally. + */ + string fingerprint_of_authorized_key = 3; +} + +message PartyToParticipantAuthorization { + AuthorizationData authorization = 1; + /** + * The request side of this transaction + * + * A party to participant mapping can map a party from one namespace on a participant from another namespace. + * Such a mapping needs to be authorized by both namespace keys. If the namespace is the same, we use + * RequestSide.Both and collapse into a single transaction. Otherwise, `From` needs to be signed by a namespace key + * of the party and `To` needs to be signed by a namespace key of the participant. + */ + com.digitalasset.canton.protocol.v0.RequestSide side = 2; + + // The unique identifier of the party + string party = 3; + // The unique identifier of the participant + string participant = 4; + // The permission of the participant that will allow him to act on behalf of the party. + com.digitalasset.canton.protocol.v0.ParticipantPermission permission = 5; +} + +message OwnerToKeyMappingAuthorization { + AuthorizationData authorization = 1; + + /** + * The key owner + * + * An entity in Canton is described by his role and his unique identifier. As such, the same unique identifier + * can be used for a mediator, sequencer, domain topology manager or even participant. Therefore, we expect + * here the protoPrimitive of a key owner which is in effect its type as a three letter code separated + * from the unique identifier. + */ + string key_owner = 2; + + /** + * The fingerprint of the key that will be authorized + * + * The key needs to be present in the local key registry (can be imported via KeyService) + */ + string fingerprint_of_key = 3; + + /** + * Purpose of the key + */ + com.digitalasset.canton.crypto.v0.KeyPurpose key_purpose = 4; +} + +message ParticipantDomainStateAuthorization { + AuthorizationData authorization = 1; + /** which side (domain or participant) is attempting to issue the authorization */ + com.digitalasset.canton.protocol.v0.RequestSide side = 2; + /** domain this authorization refers to */ + string domain = 3; + /** participant that should be authorized */ + string participant = 4; + /** permission that should be used (lower of From / To) */ + com.digitalasset.canton.protocol.v0.ParticipantPermission permission = 5; + /** trust level that should be used (ignored for side from, defaults to Ordinary) */ + com.digitalasset.canton.protocol.v0.TrustLevel trust_level = 6; +} + +message MediatorDomainStateAuthorization { + AuthorizationData authorization = 1; + /** which side (domain or mediator) is attempting to issue the authorization */ + com.digitalasset.canton.protocol.v0.RequestSide side = 2; + /** domain this authorization refers to */ + string domain = 3; + /** mediator that should be authorized */ + string mediator = 4; +} + +message VettedPackagesAuthorization { + AuthorizationData authorization = 1; + string participant = 2; + repeated string package_ids = 3; +} + +message DomainParametersChangeAuthorization { + AuthorizationData authorization = 1; + /** domain this authorization refers to */ + string domain = 2; + /** new parameters for the domain */ + oneof parameters { + com.digitalasset.canton.protocol.v2.DynamicDomainParameters parameters_v1 = 3; + } +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v1/initialization_service.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v1/initialization_service.proto new file mode 100644 index 0000000000..77fb32ddb4 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v1/initialization_service.proto @@ -0,0 +1,65 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.topology.admin.v1; + +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +import "com/digitalasset/canton/topology/admin/v0/topology_ext.proto"; + +/** + * The node identity initialization service + */ +service IdentityInitializationServiceX { + + /** + * Initialize the node with the unique identifier (can and must be done once) + * + * When a node starts for the first time, we need to bind it + * to a globally unique stable identifier before we can continue with the + * initialization procedure. + * + * This method is only used once during initialization. + */ + rpc InitId(InitIdRequest) returns (InitIdResponse); + + /** + * Returns the owner to key transactions together with all necessary certificates, used for onboarding + */ + rpc GetOnboardingTransactions(GetOnboardingTransactionsRequest) returns (GetOnboardingTransactionsResponse); + + /** + * Returns the id of the node (or empty if not initialized) + */ + rpc GetId(google.protobuf.Empty) returns (GetIdResponse); + + + /** + * Returns the current time of the node (used for testing with static time) + */ + rpc CurrentTime(google.protobuf.Empty) returns (google.protobuf.Timestamp); + +} + +message InitIdRequest { + // the unique identifier of this node + string unique_identifier = 1; +} + +message InitIdResponse { + +} + +message GetIdResponse { + bool initialized = 1; + string unique_identifier = 2; +} + +message GetOnboardingTransactionsRequest { + +} +message GetOnboardingTransactionsResponse { + com.digitalasset.canton.protocol.v0.TopologyTransactions transactions = 1; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v1/topology_manager_read_service.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v1/topology_manager_read_service.proto new file mode 100644 index 0000000000..1b816c4120 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v1/topology_manager_read_service.proto @@ -0,0 +1,294 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.topology.admin.v1; + +import "com/digitalasset/canton/protocol/v2/topology.proto"; +import "com/digitalasset/canton/protocol/v2/domain_params.proto"; +import "com/digitalasset/canton/topology/admin/v0/topology_ext.proto"; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/wrappers.proto"; + + +service TopologyManagerReadServiceX { + + rpc ListNamespaceDelegation(ListNamespaceDelegationRequest) returns (ListNamespaceDelegationResult); + rpc ListUnionspaceDefinition(ListUnionspaceDefinitionRequest) returns (ListUnionspaceDefinitionResult); + rpc ListIdentifierDelegation(ListIdentifierDelegationRequest) returns (ListIdentifierDelegationResult); + + rpc ListOwnerToKeyMapping(ListOwnerToKeyMappingRequest) returns (ListOwnerToKeyMappingResult); + + rpc ListDomainTrustCertificate(ListDomainTrustCertificateRequest) returns (ListDomainTrustCertificateResult); + rpc ListParticipantDomainPermission(ListParticipantDomainPermissionRequest) returns (ListParticipantDomainPermissionResult); + rpc ListPartyHostingLimits(ListPartyHostingLimitsRequest) returns (ListPartyHostingLimitsResult); + rpc ListVettedPackages(ListVettedPackagesRequest) returns (ListVettedPackagesResult); + + rpc ListPartyToParticipant(ListPartyToParticipantRequest) returns (ListPartyToParticipantResult); + rpc ListAuthorityOf(ListAuthorityOfRequest) returns (ListAuthorityOfResult); + + rpc ListDomainParametersState(ListDomainParametersStateRequest) returns (ListDomainParametersStateResult); + rpc ListMediatorDomainState(ListMediatorDomainStateRequest) returns (ListMediatorDomainStateResult); + rpc ListSequencerDomainState(ListSequencerDomainStateRequest) returns (ListSequencerDomainStateResult); + + rpc ListPurgeTopologyTransactionX(ListPurgeTopologyTransactionXRequest) returns (ListPurgeTopologyTransactionXResult); + + rpc ListAvailableStores(ListAvailableStoresRequest) returns (ListAvailableStoresResult); + rpc ListAll(ListAllRequest) returns (ListAllResponse); + + rpc ListTrafficState(ListTrafficStateRequest) returns (ListTrafficStateResult); +} + +message ListTrafficStateRequest { + BaseQuery base_query = 1; + string filter_member = 2; +} + +message ListTrafficStateResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v2.TrafficControlStateX item = 2; + } + repeated Result results = 1; +} + + +message BaseQuery { + string filter_store = 1; + // whether to query only for proposals instead of approved topology mappings + bool proposals = 2; + com.digitalasset.canton.protocol.v2.TopologyChangeOpX operation = 3; + // TODO(#14048) Decide whether the filter_operation flag is necessary on top of the optional operation field, if not remove + /** if true, then we'll filter the results according to above defined operation */ + bool filter_operation = 4; + message TimeRange { + google.protobuf.Timestamp from = 1; + google.protobuf.Timestamp until = 2; + } + oneof time_query { + google.protobuf.Timestamp snapshot = 5; + google.protobuf.Empty head_state = 6; + TimeRange range = 7; + } + string filter_signed_key = 8; + google.protobuf.StringValue protocol_version = 9; +} + +message BaseResult { + string store = 1; + google.protobuf.Timestamp sequenced = 2; + google.protobuf.Timestamp valid_from = 3; + google.protobuf.Timestamp valid_until = 4; + com.digitalasset.canton.protocol.v2.TopologyChangeOpX operation = 5; + bytes transaction_hash = 6; + int32 serial = 7; + repeated string signed_by_fingerprints = 8; +} + +message ListNamespaceDelegationRequest { + BaseQuery base_query = 1; + string filter_namespace = 2; + string filter_target_key_fingerprint = 3; +} + +message ListNamespaceDelegationResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v2.NamespaceDelegationX item = 2; + } + repeated Result results = 1; +} + +message ListUnionspaceDefinitionRequest { + BaseQuery base_query = 1; + string filter_namespace = 2; +} + +message ListUnionspaceDefinitionResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v2.UnionspaceDefinitionX item = 2; + } + repeated Result results = 1; +} + +message ListIdentifierDelegationRequest { + BaseQuery base_query = 1; + string filter_uid = 2; + string filter_target_key_fingerprint = 3; +} + +message ListIdentifierDelegationResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v2.IdentifierDelegationX item = 2; + } + repeated Result results = 1; +} + + +message ListOwnerToKeyMappingRequest { + BaseQuery base_query = 1; + string filter_key_owner_type = 2; + string filter_key_owner_uid = 3; +} + +message ListOwnerToKeyMappingResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v2.OwnerToKeyMappingX item = 2; + } + repeated Result results = 1; +} + + +message ListDomainTrustCertificateRequest { + BaseQuery base_query = 1; + string filter_uid = 2; +} + +message ListDomainTrustCertificateResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v2.DomainTrustCertificateX item = 2; + } + repeated Result results = 1; +} + +message ListParticipantDomainPermissionRequest { + BaseQuery base_query = 1; + string filter_uid = 2; +} + +message ListParticipantDomainPermissionResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v2.ParticipantDomainPermissionX item = 2; + } + repeated Result results = 1; +} + +message ListPartyHostingLimitsRequest { + BaseQuery base_query = 1; + string filter_uid = 2; +} + +message ListPartyHostingLimitsResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v2.PartyHostingLimitsX item = 2; + } + repeated Result results = 1; +} + +message ListVettedPackagesRequest { + BaseQuery base_query = 1; + string filter_participant = 2; +} + +message ListVettedPackagesResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v2.VettedPackagesX item = 2; + } + repeated Result results = 1; +} +message ListPartyToParticipantRequest { + BaseQuery base_query = 1; + string filter_party = 2; + string filter_participant = 3; +} + +message ListPartyToParticipantResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v2.PartyToParticipantX item = 2; + } + repeated Result results = 2; +} + +message ListAuthorityOfRequest { + BaseQuery base_query = 1; + string filter_party = 2; +} + +message ListAuthorityOfResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v2.AuthorityOfX item = 2; + } + repeated Result results = 2; +} + +message ListDomainParametersStateRequest { + BaseQuery base_query = 1; + string filter_domain = 2; +} + +message ListDomainParametersStateResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v2.DynamicDomainParameters item = 2; + } + repeated Result results = 1; +} + +message ListMediatorDomainStateRequest { + BaseQuery base_query = 1; + string filter_domain = 2; +} + +message ListMediatorDomainStateResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v2.MediatorDomainStateX item = 2; + } + repeated Result results = 1; +} + +message ListSequencerDomainStateRequest { + BaseQuery base_query = 1; + string filter_domain = 2; +} + +message ListSequencerDomainStateResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v2.SequencerDomainStateX item = 2; + } + repeated Result results = 1; +} + + +message ListPurgeTopologyTransactionXRequest { + BaseQuery base_query = 1; + string filter_domain = 2; +} + +message ListPurgeTopologyTransactionXResult { + message Result { + BaseResult context = 1; + com.digitalasset.canton.protocol.v2.PurgeTopologyTransactionX item = 2; + } + repeated Result results = 1; +} + + +message ListAvailableStoresRequest { + +} + +message ListAvailableStoresResult { + repeated string store_ids = 1; +} + +message ListAllRequest { + BaseQuery base_query = 1; +} + +message ListAllResponse { + com.digitalasset.canton.protocol.v0.TopologyTransactions result = 1; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v1/topology_manager_write_service.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v1/topology_manager_write_service.proto new file mode 100644 index 0000000000..a6bca99350 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v1/topology_manager_write_service.proto @@ -0,0 +1,127 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.topology.admin.v1; + +import "com/digitalasset/canton/protocol/v2/topology.proto"; + +/** + * Write operations on the local topology manager. + * + * Both, participant and domain run a local topology manager exposing the same write interface. + */ +service TopologyManagerWriteServiceX { + + rpc Authorize(AuthorizeRequest) returns (AuthorizeResponse); + + rpc AddTransactions(AddTransactionsRequest) returns (AddTransactionsResponse); + + rpc SignTransactions(SignTransactionsRequest) returns (SignTransactionsResponse); + +} + +message AuthorizeRequest { + message Proposal { + /** Replace / Remove */ + com.digitalasset.canton.protocol.v2.TopologyChangeOpX change = 1; + + /** Optionally, the serial number of this request (auto-determined if omitted) */ + uint32 serial = 2; + + /** The mapping to be authorized */ + com.digitalasset.canton.protocol.v2.TopologyMappingX mapping = 3; + } + + oneof type { + /** + * Propose a transaction and distribute it. + * If authorize if the node has enough signing keys + */ + Proposal proposal = 1; + /** + * Authorize a transaction, meaning the node needs to be able to fully sign it locally + */ + string transaction_hash = 2; + } + + /** + * If true: the transaction is only signed if the new signatures will result in the transaction being fully + * authorized. Otherwise returns as an error. + * If false: the transaction is signed and the signature distributed. The transaction may still not be fully + * authorized and remain as a proposal. + */ + bool must_fully_authorize = 3; + + /** Force change even if dangerous */ + bool force_change = 4; + + /** + * Fingerprint of the keys signing the authorization + * + * The signing key is used to identify a particular `NamespaceDelegation` or `IdentifierDelegation` certificate, + * which is used to justify the given authorization. + * Optional, if empty, the signing key be auto-determined. + */ + repeated string signed_by = 5; + + /** + * The store that is used as the underlying source for executing this request. + * If `store` is a domain store, the resulting topology transaction will only be available on the respective domain. + * If `store` is the authorized store, the resulting topology transaction may or may not be synchronized automatically + * to all domains that the node is currently connected to or will be connected to in the future. + * + * Selecting a specific domain store might be necessary, if the transaction to authorize by hash or the previous + * generation of the submitted proposal is only available on the domain store and not in the authorized store. + */ + string store = 6; +} + +message AuthorizeResponse { + /** the generated signed topology transaction */ + com.digitalasset.canton.protocol.v2.SignedTopologyTransactionX transaction = 1; +} + +message AddTransactionsRequest { + /** + * The transactions that should be added to the target store as indicated by the parameter `store`. + */ + repeated com.digitalasset.canton.protocol.v2.SignedTopologyTransactionX transactions = 1; + + /** Force change even if dangerous */ + bool force_change = 2; + + /** + * The store that is used as the underlying source for executing this request. + * If `store` is a domain store, the resulting topology transaction will only be available on the respective domain. + * If `store` is the authorized store, the resulting topology transaction may or may not be synchronized automatically + * to all domains that the node is currently connected to or will be connected to in the future. + * + * Selecting a specific domain store might be necessary, if the transaction to authorize by hash or the previous + * generation of the submitted proposal is only available on the domain store and not in the authorized store. + */ + string store = 3; +} + +message AddTransactionsResponse { + +} + +message SignTransactionsRequest { + /** The transactions to be signed, but will not be stored in the authorized store */ + repeated com.digitalasset.canton.protocol.v2.SignedTopologyTransactionX transactions = 1; + /** + * Fingerprint of the keys signing the authorization + * + * The signing key is used to identify a particular `NamespaceDelegation` or `IdentifierDelegation` certificate, + * which is used to justify the given authorization. + * Optional, if empty, the signing key be auto-determined. + */ + repeated string signed_by = 2; +} + +message SignTransactionsResponse { + /** The transaction with the additional signatures from this node */ + repeated com.digitalasset.canton.protocol.v2.SignedTopologyTransactionX transactions = 1; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/traffic/v0/member_traffic_status.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/traffic/v0/member_traffic_status.proto new file mode 100644 index 0000000000..4becc720f4 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/traffic/v0/member_traffic_status.proto @@ -0,0 +1,33 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.traffic.v0; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +// Full traffic status for a member at a point in time +message MemberTrafficStatus { + // Represents a top up event valid from a certain timestamp + message TopUpEvent { + // Timestamp at which the top up becomes valid (inclusive) + google.protobuf.Timestamp effective_at = 1; + // Topology transaction serial id that is used to discriminate between top ups with the same effective_at, which is possible + uint32 serial = 2; + // Traffic limit of the top up + uint64 extra_traffic_limit = 3; + } + + // Member the status is about + string member = 1; + // Total extra traffic bought. Optional. + google.protobuf.UInt64Value total_extra_traffic_limit = 2; + // Total extra traffic consumed + uint64 total_extra_traffic_consumed = 3; + // Current and future top up events that have been registered but are not necessarily active yet + repeated TopUpEvent top_up_events = 4; + // Timestamp at which the status is valid + google.protobuf.Timestamp ts = 5; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/v0/trace_context.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/v0/trace_context.proto new file mode 100644 index 0000000000..c6193e45c5 --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/v0/trace_context.proto @@ -0,0 +1,14 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.v0; + +import "google/protobuf/wrappers.proto"; + +message TraceContext { + // https://www.w3.org/TR/trace-context/ + google.protobuf.StringValue traceparent = 1; + google.protobuf.StringValue tracestate = 2; +} diff --git a/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/version/untyped_versioned_message.proto b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/version/untyped_versioned_message.proto new file mode 100644 index 0000000000..d96fd32c9f --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/com/digitalasset/canton/version/untyped_versioned_message.proto @@ -0,0 +1,25 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.version; + +/* `UntypedVersionedMessage` is the generic wrapper for versioned messages + * The version is included in the `version` integer and the serialized + * message is contained in `data`. + + * Internally, we use the typed alias `VersionedMessage[ValueClass]` to avoid + * the risk of confusion between different proto versioned messaged (see, e.g., + * the return type of the `HasVersionedWrapper.toProtoVersioned` method). + + * The `oneof` prevents the serialized message to be empty if `data` is empty + * and `version` is 0 (the default value for an integer). Such an empty message + * cannot be deserialized using previous version of our parsing method. +*/ +message UntypedVersionedMessage { + oneof wrapper { + bytes data = 1; + } + int32 version = 2; +} diff --git a/canton-3x/community/base/src/main/protobuf/google/rpc/package.proto b/canton-3x/community/base/src/main/protobuf/google/rpc/package.proto new file mode 100644 index 0000000000..ac002835da --- /dev/null +++ b/canton-3x/community/base/src/main/protobuf/google/rpc/package.proto @@ -0,0 +1,13 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package google.rpc; + +import "scalapb/scalapb.proto"; + +option (scalapb.options) = { + scope: PACKAGE + flat_package: false +}; diff --git a/canton-3x/community/base/src/main/resources/rewrite-appender.xml b/canton-3x/community/base/src/main/resources/rewrite-appender.xml new file mode 100644 index 0000000000..776cbca616 --- /dev/null +++ b/canton-3x/community/base/src/main/resources/rewrite-appender.xml @@ -0,0 +1,373 @@ + + + + + + ${REWRITE_TESTING:-false} + + + + + + + + io.grpc.netty.NettyServerStream + INFO + Stream closed before write could take place + + + + io.grpc.netty.NettyServerHandler + INFO + Stream Error + Received DATA frame for an unknown stream + + + io.grpc.netty.NettyServerHandler + INFO + Stream Error + Stream closed before write could take place + + + + com.zaxxer.hikari.pool.ProxyConnection + INFO + marked as broken + + + + org.apache.pekko.stream.Materializer + INFO + [completion at client] Upstream failed + UNAVAILABLE + + + + com.digitalasset.canton.platform.store.cache.StateCache + INFO + Failure in pending cache update for key + + + + org.flywaydb.core.internal.sqlscript.DefaultSqlScriptExecutor + INFO + can vacuum it + + + + + com.github.benmanes.caffeine.cache.LocalAsyncCache + + Exception thrown during asynchronous load + INFO + + + + + com.digitalasset.canton.platform.indexer.RecoveringIndexer + Error while running indexer, restart scheduled + MERGE INTO packages USING DUAL on package_id + INFO + + + com.digitalasset.canton.platform.store.appendonlydao.DbDispatcher + INDEX_DB_SQL_NON_TRANSIENT_ERROR + MERGE INTO packages USING DUAL on package_id + INFO + + + + + org.flywaydb.core.internal.sqlscript.DefaultSqlScriptExecutor + will be truncated to "participant_events_non_consuming_exercise_ + INFO + + + + + org.flywaydb.core.internal.database.base.Database + Flyway upgrade recommended: H2 2.1.210 is newer than this version of Flyway and support has not been tested. + INFO + + + + io.grpc.netty.NettyServerStream + INFO + Exception processing message + gRPC message exceeds maximum size + true + + + + + com.digitalasset.canton.http.StaticContentRouter + INFO + StaticContentRouter configured: StaticContentConfig + true + + + com.digitalasset.canton.http.StaticContentRouter + INFO + DO NOT USE StaticContentRouter IN PRODUCTION, CONSIDER SETTING UP REVERSE PROXY!!! + true + + + + + com.digitalasset.canton.ledger.api.auth.interceptor.AuthorizationInterceptor + INFO + Could not resolve is_deactivated status for user + true + + + + + software.amazon.awssdk.http.nio.netty.internal.SslContextProvider + INFO + SSL Certificate verification is disabled + true + + + + + com.zaxxer.hikari.pool.HikariPool + INFO + Exception during pool initialization + true + + + + + + io.grpc.internal.ManagedChannelImpl + + Failed to resolve name. + INFO + true + + + + io.grpc.internal.SerializingExecutor + test exception + INFO + true + + + com.daml.jwt.HMAC256Verifier$ + HMAC256 JWT Validator is NOT recommended for production environments + INFO + true + + + com.digitalasset.canton.platform.store.FlywayMigrations + Asked to migrate-on-empty-schema + INFO + true + + + + com.digitalasset.canton.platform.store.appendonlydao.DbDispatcher + Exception while executing SQL query. Rolled back. + pruned offset + INFO + true + + + + + com.digitalasset.canton.platform.indexer.RecoveringIndexer + Error while running indexer, restart scheduled + INFO + true + + + com.digitalasset.canton.platform.store.appendonlydao.DbDispatcher + Processing the request failed due to a non-transient database error: ORA-00001: unique constraint + ORA-00001: unique constraint + INFO + true + + + com.digitalasset.canton.platform.store.appendonlydao.DbDispatcher + Processing the request failed due to a non-transient database error: Unique index or primary key violation + Unique index or primary key violation + INFO + true + + + + com.digitalasset.canton.integration.tests.LoggingIntegrationTestDefault + INFO + true + + + com.digitalasset.canton.integration.tests.LoggingIntegrationTestOracle + INFO + true + + + com.digitalasset.canton.integration.tests.LoggingIntegrationTestPostgres + INFO + true + + + + + com.digitalasset.canton.ledger.api.auth.Authorizer + INFO + true + + + + + com.digitalasset.canton.ledger.api.auth.OngoingAuthorizationObserver + INFO + true + + + + + daml.warnings + Use of divulged contracts is deprecated and incompatible with pruning. + INFO + true + + + + + com.daml.grpc.adapter.RunnableSequencingActor + INFO + Unexpected exception while executing + true + + + + + org.testcontainers.shaded.okhttp3.OkHttpClient + INFO + A connection to http://docker.socket/ was leaked. Did you forget to close a response body? + true + + + + + 🐳 [postgres:11] + INFO + Retrying pull for image: postgres + true + + + 🐳 [postgres:11] + INFO + Docker image pull has not made progress in + true + + + + + com.digitalasset.canton.config.CommunityConfigValidations$ + INFO + DEPRECATED_PROTOCOL_VERSION + true + + + + + io.grpc.protobuf.services.HealthCheckingLoadBalancerFactory + UNIMPLEMENTED + INFO + + + + + com.digitalasset.canton.platform.apiserver.error.ErrorInterceptor + LEDGER_API_INTERNAL_ERROR + Half-closed without a request + INFO + true + + + + + com.digitalasset.canton.platform.apiserver.error.ErrorInterceptor + LEDGER_API_INTERNAL_ERROR + INTERNAL: Task com.codahale.metrics + INFO + true + + + + + com.digitalasset.canton.platform.apiserver.SeedService$ + contract ID + INFO + true + + + + + com.digitalasset.canton.concurrent.ExecutionContextMonitor + + INFO + true + + + + com.digitalasset.canton.participant.sync.SyncDomain + Late processing (or clock skew) of batch with counter + INFO + true + + + + + com.digitalasset.canton.time.TimeProofRequestSubmitterImpl + java.util.concurrent.RejectedExecutionException: null + INFO + true + + + + + diff --git a/canton-3x/community/base/src/main/resources/rewrite-async-appender.xml b/canton-3x/community/base/src/main/resources/rewrite-async-appender.xml new file mode 100644 index 0000000000..8c0a84b152 --- /dev/null +++ b/canton-3x/community/base/src/main/resources/rewrite-async-appender.xml @@ -0,0 +1,18 @@ + + + + + + + 0 + 1024 + 0 + + + + + diff --git a/canton-3x/community/base/src/main/scala/com/daml/nonempty/NonEmptyUtil.scala b/canton-3x/community/base/src/main/scala/com/daml/nonempty/NonEmptyUtil.scala new file mode 100644 index 0000000000..7ee4aed627 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/daml/nonempty/NonEmptyUtil.scala @@ -0,0 +1,51 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.daml.nonempty + +import com.digitalasset.canton.logging.pretty.Pretty +import pureconfig.{ConfigReader, ConfigWriter} + +import scala.collection.immutable +import scala.reflect.ClassTag + +/** Additional methods for [[com.daml.nonempty.NonEmpty]]. + * + * Cats instances for [[com.daml.nonempty.NonEmpty]] must be imported explicitly as + * `import `[[com.daml.nonempty.catsinstances]]`._` when necessary. + */ +object NonEmptyUtil { + def fromUnsafe[A](xs: A with immutable.Iterable[_]): NonEmpty[A] = + NonEmpty.from(xs).getOrElse(throw new NoSuchElementException) + + object instances { + + /** This instance is exposed as [[com.digitalasset.canton.logging.pretty.PrettyInstances.prettyNonempty]]. + * It lives only here because `NonEmptyColl.Instance.subst` is private to the `nonempty` package + */ + def prettyNonEmpty[A](implicit F: Pretty[A]): Pretty[NonEmpty[A]] = { + type K[T[_]] = Pretty[T[A]] + NonEmptyColl.Instance.subst[K](F) + } + + implicit def nonEmptyPureConfigReader[C <: scala.collection.immutable.Iterable[_]](implicit + reader: ConfigReader[C], + ct: ClassTag[C], + ): ConfigReader[NonEmpty[C]] = + reader.emap(c => NonEmpty.from(c).toRight(EmptyCollectionFound(ct.toString))) + + implicit def nonEmptyPureConfigWriter[C](implicit + writer: ConfigWriter[C] + ): ConfigWriter[NonEmpty[C]] = + writer.contramap(_.forgetNE) + } + + /** A failure representing an unexpected empty collection + * + * @param typ + * the type that was attempted to be converted to from an empty string + */ + final case class EmptyCollectionFound(typ: String) extends pureconfig.error.FailureReason { + override def description = s"Empty collection found when trying to convert to NonEmpty[$typ]." + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/ProtoDeserializationError.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/ProtoDeserializationError.scala new file mode 100644 index 0000000000..33fe250c25 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/ProtoDeserializationError.scala @@ -0,0 +1,97 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton + +import com.daml.error.{ErrorCategory, ErrorCode, Explanation, Resolution} +import com.digitalasset.canton.config.RequireTypes.InvariantViolation as PureInvariantViolation +import com.digitalasset.canton.error.CantonError +import com.digitalasset.canton.error.CantonErrorGroups.ProtoDeserializationErrorGroup +import com.digitalasset.canton.logging.ErrorLoggingContext +import com.digitalasset.canton.serialization.DeserializationError +import com.google.protobuf.InvalidProtocolBufferException + +sealed trait ProtoDeserializationError extends Product with Serializable { + def inField(field: String): ProtoDeserializationError.ValueDeserializationError = + ProtoDeserializationError.ValueDeserializationError(field, message) + + def message: String +} + +object ProtoDeserializationError extends ProtoDeserializationErrorGroup { + final case class BufferException(error: InvalidProtocolBufferException) + extends ProtoDeserializationError { + override val message = error.getMessage + } + final case class CryptoDeserializationError(error: DeserializationError) + extends ProtoDeserializationError { + override val message = error.message + } + final case class TransactionDeserialization(message: String) extends ProtoDeserializationError + final case class ValueDeserializationError(field: String, message: String) + extends ProtoDeserializationError + final case class StringConversionError(message: String) extends ProtoDeserializationError + final case class UnrecognizedField(message: String) extends ProtoDeserializationError + final case class UnrecognizedEnum(field: String, value: Int) extends ProtoDeserializationError { + override val message = s"Unrecognized value `$value` in enum field `$field`" + } + final case class FieldNotSet(field: String) extends ProtoDeserializationError { + override val message = s"Field `$field` is not set" + } + final case class NotImplementedYet(className: String) extends ProtoDeserializationError { + override val message = className + } + final case class TimestampConversionError(message: String) extends ProtoDeserializationError + final case class TimeModelConversionError(message: String) extends ProtoDeserializationError + final case class ValueConversionError(field: String, error: String) + extends ProtoDeserializationError { + override val message = s"Unable to convert field `$field`: $error" + } + final case class RefinedDurationConversionError(field: String, error: String) + extends ProtoDeserializationError { + override val message = s"Unable to convert numeric field `$field`: $error" + } + final case class InvariantViolation(error: String) extends ProtoDeserializationError { + override def message = error + } + final case class VersionError(versionedMessage: String, invalidVersion: Int) + extends ProtoDeserializationError { + override val message = + s"Invalid version $invalidVersion in versioned message `$versionedMessage`" + } + final case class MaxBytesToDecompressExceeded(error: String) extends ProtoDeserializationError { + override def message = error + } + final case class OtherError(error: String) extends ProtoDeserializationError { + override def message = error + } + + /** Common Deserialization error code + * + * USE THIS ERROR CODE ONLY WITHIN A GRPC SERVICE, PARSING THE INITIAL REQUEST. + * Don't used it for something like transaction processing or reading from the database. + */ + @Explanation( + """This error indicates that an incoming administrative command could not be processed due to a malformed message.""" + ) + @Resolution("Inspect the error details and correct your application") + object ProtoDeserializationFailure + extends ErrorCode( + id = "PROTO_DESERIALIZATION_FAILURE", + ErrorCategory.InvalidIndependentOfSystemState, + ) { + final case class Wrap(reason: ProtoDeserializationError)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = "Deserialization of protobuf message failed" + ) + with CantonError + } + + object InvariantViolation { + def toProtoDeserializationError(e: PureInvariantViolation): InvariantViolation = + InvariantViolation(e.message) + def apply(e: PureInvariantViolation): InvariantViolation = InvariantViolation(e.message) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/Tags.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/Tags.scala new file mode 100644 index 0000000000..d00620e030 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/Tags.scala @@ -0,0 +1,139 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton + +import cats.syntax.either.* +import com.digitalasset.canton.config.CantonRequireTypes.{ + LengthLimitedStringWrapper, + LengthLimitedStringWrapperCompanion, + String255, +} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.store.db.DbDeserializationException +import slick.jdbc.{GetResult, SetParameter} + +/** Participant local identifier used to refer to a Domain without the need to fetch identifying information from a domain. + * This does not need to be globally unique. Only unique for the participant using it. + * @param str String with given alias + */ +final case class DomainAlias(protected val str: String255) + extends LengthLimitedStringWrapper + with PrettyPrinting { + override def pretty: Pretty[DomainAlias] = + prettyOfString(inst => show"Domain ${inst.unwrap.singleQuoted}") +} +object DomainAlias extends LengthLimitedStringWrapperCompanion[String255, DomainAlias] { + override protected def companion: String255.type = String255 + override def instanceName: String = "DomainAlias" + override protected def factoryMethodWrapper(str: String255): DomainAlias = DomainAlias(str) +} + +/** Class representing a SequencerAlias. + * + * A SequencerAlias serves as a shorthand, or 'nickname', for a particular sequencer or + * group of Highly Available (HA) replicas of a sequencer within a specific node. + * + * Note: + * - SequencerAlias is a node-local concept. This means that two different participants + * may assign different aliases to the same sequencer or group of HA sequencer replicas. + * + * - The uniqueness of a SequencerAlias is only enforced within a given domain ID. This + * means a node can use the same sequencer alias for different sequencers as long as + * these sequencers belong to different domains. + */ +final case class SequencerAlias private (protected val str: String255) + extends LengthLimitedStringWrapper + with PrettyPrinting { + require(str.nonEmpty, "Empty SequencerAlias is not supported") + + override def pretty: Pretty[SequencerAlias] = + prettyOfString(inst => show"Sequencer ${inst.unwrap.singleQuoted}") + + override def toProtoPrimitive: String = + if (this == SequencerAlias.Default) "" else str.toProtoPrimitive +} + +object SequencerAlias extends LengthLimitedStringWrapperCompanion[String255, SequencerAlias] { + val Default = SequencerAlias.tryCreate("DefaultSequencer") + override protected def companion: String255.type = String255 + override def instanceName: String = "SequencerAlias" + override protected def factoryMethodWrapper(str: String255): SequencerAlias = SequencerAlias(str) + + override def create(str: String): Either[String, SequencerAlias] = + if (str.isEmpty) Left("Empty SequencerAlias is not supported") else super.create(str) + + override def fromProtoPrimitive(str: String): ParsingResult[SequencerAlias] = + if (str.isEmpty) { + Right(SequencerAlias.Default) + } else super.fromProtoPrimitive(str) +} + +/** Command identifier for tracking ledger commands + * @param id ledger string representing command + */ +final case class CommandId(private val id: LfLedgerString) extends PrettyPrinting { + def unwrap: LfLedgerString = id + def toProtoPrimitive: String = unwrap + def toLengthLimitedString: String255 = + checked(String255.tryCreate(id)) // LfLedgerString is limited to 255 chars + override def pretty: Pretty[CommandId] = prettyOfParam(_.unwrap) +} + +object CommandId { + def assertFromString(str: String) = CommandId(LfLedgerString.assertFromString(str)) + def fromProtoPrimitive(str: String): Either[String, CommandId] = + LfLedgerString.fromString(str).map(CommandId(_)) + + implicit val getResultCommandId: GetResult[CommandId] = GetResult(r => r.nextString()).andThen { + fromProtoPrimitive(_).valueOr(err => + throw new DbDeserializationException(s"Failed to deserialize command id: $err") + ) + } + + implicit val setParameterCommandId: SetParameter[CommandId] = (v, pp) => + pp >> v.toLengthLimitedString +} + +/** Application identifier for identifying customer applications in the ledger api + * @param id ledger string representing application + */ +final case class ApplicationId(private val id: LedgerApplicationId) extends PrettyPrinting { + def unwrap: LedgerApplicationId = id + def toProtoPrimitive: String = unwrap + def toLengthLimitedString: String255 = + checked(String255.tryCreate(id)) // LedgerApplicationId is limited to 255 chars + override def pretty: Pretty[ApplicationId] = prettyOfParam(_.unwrap) +} + +object ApplicationId { + def assertFromString(str: String) = ApplicationId(LedgerApplicationId.assertFromString(str)) + def fromProtoPrimitive(str: String): Either[String, ApplicationId] = + LedgerApplicationId.fromString(str).map(ApplicationId(_)) + + implicit val getResultApplicationId: GetResult[ApplicationId] = + GetResult(r => r.nextString()).andThen { + fromProtoPrimitive(_).valueOr(err => + throw new DbDeserializationException(s"Failed to deserialize application id: $err") + ) + } + + implicit val setParameterApplicationId: SetParameter[ApplicationId] = (v, pp) => + pp >> v.toLengthLimitedString +} + +/** Workflow identifier for identifying customer workflows, i.e. individual requests, in the ledger api + * @param id ledger string representing workflow + */ +final case class WorkflowId(private val id: LfWorkflowId) extends PrettyPrinting { + def unwrap: LfWorkflowId = id + def toProtoPrimitive: String = unwrap + override def pretty: Pretty[WorkflowId] = prettyOfParam(_.unwrap) +} + +object WorkflowId { + def assertFromString(str: String) = WorkflowId(LfWorkflowId.assertFromString(str)) + def fromProtoPrimitive(str: String): Either[String, WorkflowId] = + LfWorkflowId.fromString(str).map(WorkflowId(_)) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/common/domain/ServiceAgreement.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/common/domain/ServiceAgreement.scala new file mode 100644 index 0000000000..3dae7f9767 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/common/domain/ServiceAgreement.scala @@ -0,0 +1,49 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.common.domain + +import cats.syntax.either.* +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.config.CantonRequireTypes.{ + LengthLimitedStringWrapper, + LengthLimitedStringWrapperCompanion, + String255, + String256M, +} +import com.digitalasset.canton.protocol.v0 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import slick.jdbc.GetResult + +final case class ServiceAgreementId(override protected val str: String255) + extends LengthLimitedStringWrapper + +object ServiceAgreementId + extends LengthLimitedStringWrapperCompanion[String255, ServiceAgreementId] { + override def instanceName: String = "ServiceAgreementId" + + override protected def companion: String255.type = String255 + + override protected def factoryMethodWrapper(str: String255): ServiceAgreementId = + ServiceAgreementId(str) +} + +final case class ServiceAgreement(id: ServiceAgreementId, text: String256M) { + def toProtoV0: v0.ServiceAgreement = + v0.ServiceAgreement(id.unwrap, text.toProtoPrimitive) +} + +object ServiceAgreement { + implicit val serviceAgreementGetResult: GetResult[ServiceAgreement] = + GetResult(r => ServiceAgreement(ServiceAgreementId.tryCreate(r.<<), r.<<)) + + def fromProtoV0( + agreement: v0.ServiceAgreement + ): ParsingResult[ServiceAgreement] = + for { + id <- ServiceAgreementId.fromProtoPrimitive(agreement.id) + legalText <- String256M + .create(agreement.legalText) + .leftMap(ProtoDeserializationError.ValueDeserializationError("legal_text", _)) + } yield ServiceAgreement(id, legalText) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/DirectExecutionContext.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/DirectExecutionContext.scala new file mode 100644 index 0000000000..e0bcf8ea97 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/DirectExecutionContext.scala @@ -0,0 +1,35 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.concurrent + +import com.digitalasset.canton.logging.{NamedLogging, TracedLogger} +import com.typesafe.scalalogging.Logger + +import scala.concurrent.{BatchingExecutorCanton, ExecutionContextExecutor} + +/** A light-weight execution context that runs tasks on the thread calling `execute`. + * Only use this for running tasks that will terminate very quickly. + */ +final case class DirectExecutionContext(logger: Logger) + extends ExecutionContextExecutor + with BatchingExecutorCanton { + + private val reporter: Throwable => Unit = + Threading.createReporter(getClass.getSimpleName, logger, exitOnFatal = true) + + override def submitForExecution(runnable: Runnable): Unit = runnable.run() + + override def execute(runnable: Runnable): Unit = submitSyncBatched(runnable) + + override def reportFailure(cause: Throwable): Unit = { + reporter(cause) + // Do not rethrow cause. + // If this method throws an exception, the exception would ultimately be reported by a different EC, + // but this leads to a messy repetition of error messages in the log file. + } +} +object DirectExecutionContext { + def apply(tracedLogger: TracedLogger): DirectExecutionContext = + DirectExecutionContext(NamedLogging.loggerWithoutTracing(tracedLogger)) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/ExecutionContextMonitor.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/ExecutionContextMonitor.scala new file mode 100644 index 0000000000..d1bb6ef1b1 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/ExecutionContextMonitor.scala @@ -0,0 +1,118 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.concurrent + +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.lifecycle.FlagCloseable +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.time.NonNegativeFiniteDuration +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.{FutureUtil, LoggerUtil, StackTraceUtil} + +import java.time.Instant +import java.util.concurrent.* +import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} +import scala.concurrent.duration.Duration +import scala.concurrent.{ExecutionContext, Future} + +/** Debugging utility used to write at regular intervals the executor service queue size into the logfile + * + * Useful to debug starvation issues. + */ +class ExecutionContextMonitor( + val loggerFactory: NamedLoggerFactory, + interval: NonNegativeFiniteDuration, + warnInterval: NonNegativeFiniteDuration, + override val timeouts: ProcessingTimeout, +)(implicit scheduler: ScheduledExecutorService) + extends NamedLogging + with FlagCloseable { + + private def schedule(runnable: Runnable): Unit = { + val ms = interval.toScala.toMillis + val _ = scheduler.scheduleAtFixedRate(runnable, ms, ms, TimeUnit.MILLISECONDS) + } + + private val warnIntervalMs = warnInterval.duration.toMillis + + // indicates when the last pending task has been scheduled + private val scheduled = new AtomicReference[Option[Long]](None) + // indicates how many times the current deadlock (if any) has been reported + private val reported = new AtomicInteger(0) + + import TraceContext.Implicits.Empty.* + + def monitor(ec: ExecutionContextIdlenessExecutorService): Unit = { + logger.debug(s"Monitoring ${ec.name}") + val runnable = new Runnable { + override def run(): Unit = { + if (!isClosing) { + // if we are still scheduled, complain! + val started = scheduled.getAndUpdate { + case None => Some(Instant.now().toEpochMilli) + case x => x + } + started match { + // if we are still scheduled, complain + case Some(started) => + reportIssue(ec, started) + // if we aren't scheduled yet, schedule a future! + case None => + implicit val myEc: ExecutionContext = ec + FutureUtil.doNotAwait( + Future { + if (scheduled.getAndSet(None).isEmpty) { + logger.error(s"Are we monitoring the EC ${ec.name} twice?") + } + // reset the reporting + if (reported.getAndSet(0) > 0) { + emit( + s"Task runner ${ec.name} is just overloaded, but operating correctly. Task got executed in the meantime." + ) + } + }, + "Monitoring future failed despite being trivial ...", + ) + } + } + } + } + schedule(runnable) + } + + private def emit(message: => String): Unit = { + logger.warn(message) + } + + private def reportIssue( + ec: ExecutionContextIdlenessExecutorService, + startedEpochMs: Long, + ): Unit = { + val delta = Instant.now().toEpochMilli - startedEpochMs + val current = reported.getAndIncrement() + val warn = delta > (warnIntervalMs * current) + if (warn) { + val deltaTs = LoggerUtil.roundDurationForHumans(Duration(delta, TimeUnit.MILLISECONDS)) + if (current > 0) { + emit( + s"Task runner ${ec.name} is still stuck or overloaded for ${deltaTs}. (queue-size=${ec.queueSize}).\n$ec" + ) + } else { + emit( + s"Task runner ${ec.name} is stuck or overloaded for ${deltaTs}. (queue-size=${ec.queueSize}).\n$ec" + ) + } + val traces = StackTraceUtil.formatStackTrace(_.getName.startsWith(ec.name)) + val msg = s"Here is the stack-trace of threads for ${ec.name}:\n$traces" + if (current == 0) + logger.info(msg) + else + logger.debug(msg) + } else { + reported.updateAndGet(x => Math.max(0, x - 1)).discard + } + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/ExecutorServiceExtensions.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/ExecutorServiceExtensions.scala new file mode 100644 index 0000000000..eea7a63fef --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/ExecutorServiceExtensions.scala @@ -0,0 +1,47 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.concurrent + +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.lifecycle.Lifecycle +import com.digitalasset.canton.logging.TracedLogger + +import java.util.concurrent.{ExecutorService, TimeUnit} + +final case class ExecutorServiceExtensions[EC <: ExecutorService](executorService: EC)( + logger: TracedLogger, + timeouts: ProcessingTimeout, +) extends AutoCloseable { + import scala.concurrent.duration.* + + private val DefaultTerminationAwaitDuration: FiniteDuration = + timeouts.shutdownShort.asFiniteApproximation + + /** Cleanly shuts down an executor service as best we can. + * @param name Name of the component using the ExecutorService. Used in log messages if executor does not shutdown cleanly. + */ + def close(name: String): Unit = close(Some(name)) + + /** Cleanly shuts down an executor service as best we can. + */ + override def close(): Unit = close(None) + + private def close(name: Option[String]): Unit = { + def awaitIdleness(timeout: FiniteDuration): Boolean = executorService match { + case executor: IdlenessExecutorService => executor.awaitIdleness(timeout) + case _ => true + } + + Lifecycle.shutdownResource( + name.getOrElse(s"executor-${executorService.toString}"), + () => executorService.shutdown(), + () => { val _ = executorService.shutdownNow() }, + awaitIdleness, + timeout => executorService.awaitTermination(timeout.toMillis, TimeUnit.MILLISECONDS), + DefaultTerminationAwaitDuration, + DefaultTerminationAwaitDuration, + logger, + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/FutureSupervisor.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/FutureSupervisor.scala new file mode 100644 index 0000000000..fa663e7832 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/FutureSupervisor.scala @@ -0,0 +1,166 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.concurrent + +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.config.NonNegativeDuration +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.ErrorLoggingContext +import com.digitalasset.canton.util.LoggerUtil +import com.digitalasset.canton.util.Thereafter.syntax.* +import org.slf4j.event.Level + +import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} +import java.util.concurrent.{ScheduledExecutorService, TimeUnit} +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future} +import scala.util.{Failure, Success} + +/** Alert if a future does not complete within the prescribed duration + * + * We use future based synchronisation in some places, where we use a promise to only kick + * off an action once a promise is completed. This can lead to deadlocks where something + * does not start because we never complete the promise. + * This leads to hard to debug situations. We can support debugging by tracking such futures. + * As this is costly, we'll turn this off in production. + * + * @see HasFutureSupervision for a mixin + */ +trait FutureSupervisor { + def supervised[T]( + description: => String, + warnAfter: Duration = 10.seconds, + logLevel: Level = Level.WARN, + )(fut: Future[T])(implicit + errorLoggingContext: ErrorLoggingContext, + executionContext: ExecutionContext, + ): Future[T] + def supervisedUS[T]( + description: => String, + warnAfter: Duration = 10.seconds, + logLevel: Level = Level.WARN, + )( + fut: FutureUnlessShutdown[T] + )(implicit + errorLoggingContext: ErrorLoggingContext, + executionContext: ExecutionContext, + ): FutureUnlessShutdown[T] = FutureUnlessShutdown( + supervised(description, warnAfter, logLevel)(fut.unwrap) + ) +} + +object FutureSupervisor { + + object Noop extends FutureSupervisor { + override def supervised[T]( + description: => String, + warnAfter: Duration, + logLevel: Level = Level.WARN, + )( + fut: Future[T] + )(implicit + errorLoggingContext: ErrorLoggingContext, + executionContext: ExecutionContext, + ): Future[T] = fut + } + + class Impl( + defaultWarningInterval: NonNegativeDuration + )(implicit + scheduler: ScheduledExecutorService + ) extends FutureSupervisor { + + private case class ScheduledFuture( + fut: Future[_], + description: () => String, + startNanos: Long, + warnNanos: Long, + errorLoggingContext: ErrorLoggingContext, + logLevel: Level, + ) { + val warnCounter = new AtomicInteger(1) + def alertNow(currentNanos: Long): Boolean = { + val cur = warnCounter.get() + if (currentNanos - startNanos > (warnNanos * cur) && !fut.isCompleted) { + warnCounter.incrementAndGet().discard + true + } else false + } + } + + private val scheduled = new AtomicReference[Seq[ScheduledFuture]](Seq()) + private val defaultCheckMs = 1000L + + // schedule regular background checks + scheduler.scheduleWithFixedDelay( + () => checkSlow(), + defaultCheckMs, + defaultCheckMs, + TimeUnit.MILLISECONDS, + ) + + private def log( + message: String, + level: Level, + elc: ErrorLoggingContext, + exception: Option[Throwable] = None, + ): Unit = { + exception + .map(LoggerUtil.logThrowableAtLevel(level, message, _)(elc)) + .getOrElse(LoggerUtil.logAtLevel(level, message)(elc)) + } + + private def checkSlow(): Unit = { + val now = System.nanoTime() + val cur = scheduled.updateAndGet(_.filterNot(_.fut.isCompleted)) + cur.filter(x => x.alertNow(now)).foreach { blocked => + val dur = Duration.fromNanos(now - blocked.startNanos) + val message = + s"${blocked.description()} has not completed after ${LoggerUtil.roundDurationForHumans(dur)}" + log(message, blocked.logLevel, blocked.errorLoggingContext) + } + } + + def supervised[T]( + description: => String, + warnAfter: Duration = defaultWarningInterval.duration, + logLevel: Level = Level.WARN, + )(fut: Future[T])(implicit + errorLoggingContext: ErrorLoggingContext, + executionContext: ExecutionContext, + ): Future[T] = { + val itm = + ScheduledFuture( + fut, + () => description, + startNanos = System.nanoTime(), + warnAfter.toNanos, + errorLoggingContext, + logLevel, + ) + scheduled.updateAndGet(x => x.filterNot(_.fut.isCompleted) :+ itm) + fut.thereafter { + case Failure(exception) => + log( + s"${description} failed with exception after ${elapsed(itm)}", + logLevel, + errorLoggingContext, + ) + case Success(_) => + val time = elapsed(itm) + if (time > warnAfter) { + errorLoggingContext.info( + s"${description} succeed successfully but slow after $time" + ) + } + } + } + + private def elapsed(item: ScheduledFuture): Duration = { + val dur = Duration.fromNanos(System.nanoTime() - item.startNanos) + LoggerUtil.roundDurationForHumans(dur) + } + + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/HasFutureSupervision.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/HasFutureSupervision.scala new file mode 100644 index 0000000000..2d2c926d8f --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/HasFutureSupervision.scala @@ -0,0 +1,29 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.concurrent + +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.NamedLogging +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future} + +/** Mixin-trait for classes that want to run some futures with supervision using a [[FutureSupervisor]] strategy */ +trait HasFutureSupervision { this: NamedLogging => + + protected def futureSupervisor: FutureSupervisor + + protected def executionContext: ExecutionContext + + protected def supervised[T](description: => String, warnAfter: Duration = 10.seconds)( + fut: Future[T] + )(implicit traceContext: TraceContext): Future[T] = + futureSupervisor.supervised(description, warnAfter)(fut)(implicitly, executionContext) + + def supervisedUS[T](description: => String, warnAfter: Duration = 10.seconds)( + fut: FutureUnlessShutdown[T] + )(implicit traceContext: TraceContext): FutureUnlessShutdown[T] = + FutureUnlessShutdown(supervised(description, warnAfter)(fut.unwrap)) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/IdlenessExecutorService.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/IdlenessExecutorService.scala new file mode 100644 index 0000000000..75062382d8 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/IdlenessExecutorService.scala @@ -0,0 +1,104 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.concurrent + +import com.daml.executors.QueueAwareExecutorService + +import java.util.concurrent.* +import scala.annotation.tailrec +import scala.concurrent.ExecutionContextExecutorService +import scala.concurrent.duration.FiniteDuration + +trait IdlenessExecutorService extends ExecutorService { + + /** Waits until all threads in the executor service are idle. + * The current thread may help in processing submitted tasks. + * The method may be conservative: it can return false even + * if all threads are idle at the end of the `duration`. + * + * @param timeout The maximum time to wait. + * This time may be exceeded up to the run-time of the longest running task in the pool. + * @return true if all threads are idle; false if the timeout elapsed + */ + @SuppressWarnings( + Array( + "org.wartremover.warts.Var", + "org.wartremover.warts.While", + "com.digitalasset.canton.RequireBlocking", + ) + ) + def awaitIdleness(timeout: FiniteDuration): Boolean = { + // Check whether this is idle for 5 consecutive times. + // We check several times, as awaitIdlenessOnce may incorrectly indicate idleness. + val deadline = timeout.fromNow + var idleCount = 0 + var remainingTime = deadline.timeLeft + while (remainingTime.toMillis > 0 && idleCount < 5) { + // Do not use `blocking` because we do not want the execution context to spawn new threads now + Thread.sleep(1L) + if (awaitIdlenessOnce(remainingTime)) + idleCount += 1 + else + idleCount = 0 + remainingTime = deadline.timeLeft + } + idleCount == 5 + } + + protected[concurrent] def awaitIdlenessOnce(timeout: FiniteDuration): Boolean + +} + +abstract class ExecutionContextIdlenessExecutorService( + executorService: ExecutorService, + name: String, +) extends QueueAwareExecutorService(executorService, name) + with IdlenessExecutorService + with ExecutionContextExecutorService + +class ForkJoinIdlenessExecutorService( + pool: ForkJoinPool, + delegate: ExecutorService, + reporter: Throwable => Unit, + name: String, +) extends ExecutionContextIdlenessExecutorService(delegate, name) { + override def reportFailure(cause: Throwable): Unit = reporter(cause) + + override protected[concurrent] def awaitIdlenessOnce(timeout: FiniteDuration): Boolean = { + pool.awaitQuiescence(timeout.toMillis, TimeUnit.MILLISECONDS) + } + + override def toString: String = s"ForkJoinIdlenessExecutorService-$name: $pool" + +} + +class ThreadPoolIdlenessExecutorService( + pool: ThreadPoolExecutor, + reporter: Throwable => Unit, + override val name: String, +) extends ExecutionContextIdlenessExecutorService(pool, name) { + + override def reportFailure(cause: Throwable): Unit = reporter(cause) + + override protected[concurrent] def awaitIdlenessOnce(timeout: FiniteDuration): Boolean = { + val deadline = timeout.fromNow + val minSleep = 1L + val maxSleep = Math.max(timeout.toMillis >> 2, minSleep) + + @SuppressWarnings(Array("com.digitalasset.canton.RequireBlocking")) + @tailrec def go(sleep: Long): Boolean = { + if (deadline.isOverdue()) + false + else if (pool.getQueue.isEmpty && pool.getActiveCount == 0) + true + else { + // Do not use `blocking` because we do not want the execution context to spawn new threads now + Thread.sleep(sleep) + go(Math.min(sleep * 2, maxSleep)) + } + } + + go(minSleep) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/SupervisedPromise.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/SupervisedPromise.scala new file mode 100644 index 0000000000..bc82c68f8c --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/SupervisedPromise.scala @@ -0,0 +1,32 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.concurrent + +import com.digitalasset.canton.logging.ErrorLoggingContext +import org.slf4j.event.Level + +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future, Promise} +import scala.util.Try + +/** Promise that will log a message after logAfter if it has not been completed. + */ +class SupervisedPromise[T]( + description: String, + futureSupervisor: FutureSupervisor, + logAfter: Duration = 10.seconds, + // TODO(i11704): lift to a higher level once known un-completed promises have been fixed + logLevel: Level = Level.DEBUG, +)(implicit + ecl: ErrorLoggingContext, + ec: ExecutionContext, +) extends Promise[T] { + private val promise: Promise[T] = Promise[T]() + override def future: Future[T] = { + futureSupervisor.supervised(description, logAfter, logLevel)(promise.future) + } + + override def isCompleted: Boolean = promise.isCompleted + override def tryComplete(result: Try[T]): Boolean = promise.tryComplete(result) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/Threading.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/Threading.scala new file mode 100644 index 0000000000..1aebf0dfb0 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/concurrent/Threading.scala @@ -0,0 +1,313 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.concurrent + +import cats.syntax.either.* +import com.daml.metrics.ExecutorServiceMetrics +import com.digitalasset.canton.lifecycle.ClosingException +import com.digitalasset.canton.util.ErrorUtil +import com.digitalasset.canton.util.ShowUtil.* +import com.google.common.util.concurrent.ThreadFactoryBuilder +import com.typesafe.scalalogging.Logger + +import java.util.concurrent.* +import java.util.function.Predicate +import scala.concurrent.{ExecutionContext, blocking} + +/** Factories and utilities for dealing with threading. + */ +object Threading { + + /** Creates a singled threaded scheduled executor. + * @param name used for created threads. Prefer dash separated names. `-{n}` will be appended. + * @param logger where uncaught exceptions are logged + */ + def singleThreadScheduledExecutor( + name: String, + logger: Logger, + daemon: Boolean = false, + ): ScheduledExecutorService = { + val executor = new ScheduledThreadPoolExecutor( + 1, + threadFactory(name, logger, exitOnFatal = true, daemon = daemon), + ) + // we don't want tasks scheduled far in the future to prevent a clean shutdown + executor.setExecuteExistingDelayedTasksAfterShutdownPolicy(false) + executor + } + + /** Creates a singled threaded scheduled executor with maximum thread pool size = 1. + * @param name used for created threads. Prefer dash separated names. + * @param logger where uncaught exceptions are logged + */ + def singleThreadedExecutor( + name: String, + logger: Logger, + ): ExecutionContextIdlenessExecutorService = { + val executor = new ThreadPoolExecutor( + 1, + 1, + 0L, + TimeUnit.MILLISECONDS, + new LinkedBlockingQueue[Runnable](), + threadFactory(name, logger, exitOnFatal = true), + ) + new ThreadPoolIdlenessExecutorService( + executor, + createReporter(name, logger, exitOnFatal = true)(_), + name, + ) + } + + /** @param exitOnFatal terminate the JVM on fatal errors. Enable this in production to prevent data corruption by + * termination of specific threads. + */ + private def threadFactory( + name: String, + logger: Logger, + exitOnFatal: Boolean, + daemon: Boolean = false, + ): ThreadFactory = + new ThreadFactoryBuilder() + .setUncaughtExceptionHandler(createUncaughtExceptionHandler(logger, exitOnFatal)) + .setNameFormat(s"$name-%d") + .setDaemon(daemon) + .build() + + /** @param exitOnFatal terminate the JVM on fatal errors. Enable this in production to prevent data corruption by + * termination of specific threads. + */ + private def createUncaughtExceptionHandler( + logger: Logger, + exitOnFatal: Boolean, + ): Thread.UncaughtExceptionHandler = + (t: Thread, e: Throwable) => createReporter(t.getName, logger, exitOnFatal)(e) + + /** @param exitOnFatal terminate the JVM on fatal errors. Enable this in production to prevent data corruption by + * termination of specific threads. + */ + def createReporter(name: String, logger: Logger, exitOnFatal: Boolean)( + throwable: Throwable + ): Unit = { + if (exitOnFatal) doExitOnFatal(name, logger)(throwable) + throwable match { + case ex: io.grpc.StatusRuntimeException + if ex.getStatus.getCode == io.grpc.Status.Code.CANCELLED => + logger.info(s"Grpc channel cancelled in $name.", ex) + case ClosingException(_) => + logger.info(s"Unclean shutdown due to cancellation in $name.", throwable) + case _: Throwable => + logger.error(s"A fatal error has occurred in $name. Terminating thread.", throwable) + } + } + + private def doExitOnFatal(name: String, logger: Logger)(throwable: Throwable): Unit = + throwable match { + case _: LinkageError | _: VirtualMachineError => + // Output the error reason both to stderr and the logger, + // because System.exit tends to terminate the JVM before everything has been output. + Console.err.println( + s"A fatal error has occurred in $name. Terminating immediately.\n${ErrorUtil.messageWithStacktrace(throwable)}" + ) + Console.err.flush() + logger.error(s"A fatal error has occurred in $name. Terminating immediately.", throwable) + System.exit(-1) + case _: Throwable => // no fatal error, nothing to do + } + + def newExecutionContext( + name: String, + logger: Logger, + metrics: ExecutorServiceMetrics, + ): ExecutionContextIdlenessExecutorService = + newExecutionContext(name, logger, Some(metrics)) + + def newExecutionContext( + name: String, + logger: Logger, + maybeMetrics: Option[ExecutorServiceMetrics], + ): ExecutionContextIdlenessExecutorService = + newExecutionContext( + name, + logger, + maybeMetrics, + detectNumberOfThreads(logger), + ) + + /** Yields an `ExecutionContext` like `scala.concurrent.ExecutionContext.global`, + * except that it has its own thread pool. + * + * @param exitOnFatal terminate the JVM on fatal errors. Enable this in production to prevent data corruption by + * termination of specific threads. + */ + @SuppressWarnings(Array("org.wartremover.warts.Null", "org.wartremover.warts.AsInstanceOf")) + def newExecutionContext( + name: String, + logger: Logger, + maybeMetrics: Option[ExecutorServiceMetrics], + parallelism: Int, + maxExtraThreads: Int = 256, + exitOnFatal: Boolean = true, + ): ExecutionContextIdlenessExecutorService = { + val reporter = createReporter(name, logger, exitOnFatal)(_) + val handler = ((_, cause) => reporter(cause)): Thread.UncaughtExceptionHandler + + val threadFactoryConstructor = Class + .forName("scala.concurrent.impl.ExecutionContextImpl$DefaultThreadFactory") + .getDeclaredConstructor( + classOf[Boolean], + classOf[Int], + classOf[String], + classOf[Thread.UncaughtExceptionHandler], + ) + threadFactoryConstructor.setAccessible(true) + val threadFactory = threadFactoryConstructor + .newInstance(Boolean.box(true), Int.box(maxExtraThreads), name, handler) + .asInstanceOf[ForkJoinPool.ForkJoinWorkerThreadFactory] + + val forkJoinPool = createForkJoinPool(parallelism, threadFactory, handler, logger) + val executorService = + maybeMetrics.fold(forkJoinPool: ExecutorService)( + _.monitorExecutorService(name, forkJoinPool) + ) + + new ForkJoinIdlenessExecutorService(forkJoinPool, executorService, reporter, name) + } + + /** Minimum parallelism of ForkJoinPool. + * Currently greater than one to work around a bug that prevents creation of new threads to compensate blocking tasks. + */ + val minParallelism = 3 + + @SuppressWarnings(Array("org.wartremover.warts.Null")) + private def createForkJoinPool( + parallelism: Int, + threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, + handler: Thread.UncaughtExceptionHandler, + logger: Logger, + ): ForkJoinPool = { + val tunedParallelism = + if (parallelism >= minParallelism) parallelism + else { + // The calculation of running threads in ForkJoinPool may overestimate the actual number. + // As a result, we need to request at least minParallelism threads to get at least 1 (with high probability). + // The pool may still run out of threads, but the probability is much lower. + logger.info( + s"Creating ForkJoinPool with parallelism = $minParallelism (instead of $parallelism) to avoid starvation." + ) + minParallelism + } + + try { + val java11ForkJoinPoolConstructor = classOf[ForkJoinPool].getConstructor( + classOf[Int], + classOf[ForkJoinPool.ForkJoinWorkerThreadFactory], + classOf[Thread.UncaughtExceptionHandler], + classOf[Boolean], + classOf[Int], + classOf[Int], + classOf[Int], + classOf[Predicate[_]], + classOf[Long], + classOf[TimeUnit], + ) + + java11ForkJoinPoolConstructor.newInstance( + Int.box(tunedParallelism), + threadFactory, + handler, + Boolean.box(true), + Int.box(tunedParallelism), + Int.box(Int.MaxValue), + // + // Choosing tunedParallelism here instead of the default of 1. + // With the default, we would get only 1 running thread in the presence of blocking calls. + Int.box(tunedParallelism), + null, + Long.box(60), + TimeUnit.SECONDS, + ) + } catch { + case _: NoSuchMethodException => + logger.warn( + "Unable to create ForkJoinPool of Java 11. " + + "Using fallback instead, which has been tested less than the default one. " + + "Do not use this setting in production." + ) + new ForkJoinPool(tunedParallelism, threadFactory, handler, true) + } + } + + def directExecutionContext(logger: Logger): ExecutionContext = DirectExecutionContext( + logger + ) + + /** Detects the number of threads the same way as `scala.concurrent.impl.ExecutionContextImpl`, + * except that system property values like 'x2' are not supported. + */ + @SuppressWarnings(Array("org.wartremover.warts.Var")) + def detectNumberOfThreads(logger: Logger): Int = { + def getIntProperty(name: String): Option[Int] = + for { + strProperty <- Option(System.getProperty(name)) + parsedValue <- Either + .catchOnly[NumberFormatException](strProperty.toInt) + .leftMap(_ => + logger.warn( + show"Unable to parse '-D${strProperty.singleQuoted}' as value of ${name.unquoted}. Ignoring value." + ) + ) + .toOption + value <- + if (parsedValue >= 1) Some(parsedValue) + else { + logger.warn( + show"The value $parsedValue of '-D${name.unquoted}' is less than 1. Ignoring value." + ) + None + } + } yield value + + var numThreads = getIntProperty(numThreadsProp) match { + case Some(n) => + logger.info(s"Deriving $n as number of threads from '-D$numThreadsProp'.") + n + case None => + val n = sys.runtime.availableProcessors() + logger.info( + s"Deriving $n as number of threads from 'sys.runtime.availableProcessors()'. " + + s"Please use '-D$numThreadsProp' to override." + ) + n + } + + getIntProperty(minThreadsProp).foreach { minThreads => + if (numThreads < minThreads) { + logger.info( + s"Applying '-D$minThreadsProp' to increase number of threads from $numThreads to $minThreads." + ) + numThreads = minThreads + } + } + + getIntProperty(maxThreadsProp).foreach { maxThreads => + if (numThreads > maxThreads) { + logger.info( + s"Applying '-D$maxThreadsProp' to decrease number of threads from $numThreads to $maxThreads." + ) + numThreads = maxThreads + } + } + + numThreads + } + + val numThreadsProp = "scala.concurrent.context.numThreads" + val minThreadsProp = "scala.concurrent.context.minThreads" + val maxThreadsProp = "scala.concurrent.context.maxThreads" + val threadingProps = List(numThreadsProp, minThreadsProp, maxThreadsProp) + + @SuppressWarnings(Array("com.digitalasset.canton.RequireBlocking")) + def sleep(millis: Long, nanos: Int = 0): Unit = blocking { Thread.sleep(millis, nanos) } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/ApiLoggingConfig.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/ApiLoggingConfig.scala new file mode 100644 index 0000000000..f3339d22d4 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/ApiLoggingConfig.scala @@ -0,0 +1,46 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +import com.digitalasset.canton.logging.pretty.CantonPrettyPrinter + +/** Control logging of the ApiRequestLogger + * + * Every GRPC service invocation is logged through the ApiRequestLogger. This allows + * to monitor all incoming traffic to a node (ledger API, sequencer API, admin API). + * + * @param messagePayloads Indicates whether to log message payloads. (To be disabled in production!) + * Also applies to metadata. None is equivalent to false. + * @param maxMethodLength indicates how much to abbreviate the name of the called method. + * E.g. "com.digitalasset.canton.MyMethod" may get abbreviated to "c.d.c.MyMethod". + * The last token will never get abbreviated. + * @param maxMessageLines maximum number of lines to log for a message + * @param maxStringLength maximum number of characters to log for a string within a message + * @param maxMetadataSize maximum size of metadata + * @param warnBeyondLoad If API logging is turned on, emit a warning on each request if the load exceeds this threshold. + */ +final case class ApiLoggingConfig( + // TODO(#15221) change to boolean (breaking change) + messagePayloads: Option[Boolean] = None, + maxMethodLength: Int = ApiLoggingConfig.defaultMaxMethodLength, + maxMessageLines: Int = ApiLoggingConfig.defaultMaxMessageLines, + maxStringLength: Int = ApiLoggingConfig.defaultMaxStringLength, + maxMetadataSize: Int = ApiLoggingConfig.defaultMaxMetadataSize, + warnBeyondLoad: Option[Int] = ApiLoggingConfig.defaultWarnBeyondLoad, +) { + + lazy val logMessagePayloads: Boolean = messagePayloads.getOrElse(false) + + /** Pretty printer for logging event details */ + lazy val printer = new CantonPrettyPrinter(maxStringLength, maxMessageLines) + +} + +object ApiLoggingConfig { + val defaultMaxMethodLength: Int = 30 + val defaultMaxMessageLines: Int = 20 + val defaultMaxStringLength: Int = 250 + val defaultMaxMetadataSize: Int = 200 + val defaultWarnBeyondLoad: Option[Int] = Some(5) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/CacheConfig.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/CacheConfig.scala new file mode 100644 index 0000000000..6e289b0957 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/CacheConfig.scala @@ -0,0 +1,93 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +import com.digitalasset.canton.config.RequireTypes.PositiveNumeric +import com.github.blemale.scaffeine.Scaffeine +import com.google.common.annotations.VisibleForTesting + +import scala.concurrent.ExecutionContext + +/** Configurations settings for a single cache + * + * @param maximumSize the maximum size of the cache + * @param expireAfterAccess how quickly after last access items should be expired from the cache + */ +final case class CacheConfig( + maximumSize: PositiveNumeric[Long], + expireAfterAccess: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofMinutes(10), +) { + + def buildScaffeine()(implicit ec: ExecutionContext): Scaffeine[Any, Any] = + Scaffeine() + .maximumSize(maximumSize.value) + .expireAfterAccess(expireAfterAccess.underlying) + .executor(ec.execute(_)) + +} + +final case class CacheConfigWithTimeout( + maximumSize: PositiveNumeric[Long], + expireAfterTimeout: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofMinutes(10), +) { + + def buildScaffeine(): Scaffeine[Any, Any] = + Scaffeine().maximumSize(maximumSize.value).expireAfterWrite(expireAfterTimeout.underlying) + +} + +/** Configuration settings for various internal caches + * + * @param indexedStrings cache size configuration for the static string index cache + * @param contractStore cache size configuration for the contract store + * @param topologySnapshot cache size configuration for topology snapshots + * @param finalizedMediatorRequests cache size for the finalized mediator requests such the mediator does not have to + * perform a db round-trip if we have slow responders. + */ +final case class CachingConfigs( + indexedStrings: CacheConfig = CachingConfigs.defaultStaticStringCache, + contractStore: CacheConfig = CachingConfigs.defaultContractStoreCache, + topologySnapshot: CacheConfig = CachingConfigs.defaultTopologySnapshotCache, + partyCache: CacheConfig = CachingConfigs.defaultPartyCache, + participantCache: CacheConfig = CachingConfigs.defaultParticipantCache, + keyCache: CacheConfig = CachingConfigs.defaultKeyCache, + sessionKeyCache: CacheConfigWithTimeout = CachingConfigs.defaultSessionKeyCache, + packageVettingCache: CacheConfig = CachingConfigs.defaultPackageVettingCache, + mySigningKeyCache: CacheConfig = CachingConfigs.defaultMySigningKeyCache, + trafficStatusCache: CacheConfig = CachingConfigs.defaultTrafficStatusCache, + memberCache: CacheConfig = CachingConfigs.defaultMemberCache, + kmsMetadataCache: CacheConfig = CachingConfigs.kmsMetadataCache, + finalizedMediatorRequests: CacheConfig = CachingConfigs.defaultFinalizedMediatorRequestsCache, +) + +object CachingConfigs { + val defaultStaticStringCache: CacheConfig = + CacheConfig(maximumSize = PositiveNumeric.tryCreate(10000)) + val defaultContractStoreCache: CacheConfig = + CacheConfig(maximumSize = PositiveNumeric.tryCreate(10000)) + val defaultTopologySnapshotCache: CacheConfig = + CacheConfig(maximumSize = PositiveNumeric.tryCreate(100)) + val defaultPartyCache: CacheConfig = CacheConfig(maximumSize = PositiveNumeric.tryCreate(10000)) + val defaultParticipantCache: CacheConfig = + CacheConfig(maximumSize = PositiveNumeric.tryCreate(1000)) + val defaultKeyCache: CacheConfig = CacheConfig(maximumSize = PositiveNumeric.tryCreate(1000)) + val defaultSessionKeyCache: CacheConfigWithTimeout = + CacheConfigWithTimeout(maximumSize = PositiveNumeric.tryCreate(10000)) + val defaultPackageVettingCache: CacheConfig = + CacheConfig(maximumSize = PositiveNumeric.tryCreate(10000)) + val defaultMySigningKeyCache: CacheConfig = + CacheConfig(maximumSize = PositiveNumeric.tryCreate(5)) + val defaultTrafficStatusCache: CacheConfig = + CacheConfig(maximumSize = PositiveNumeric.tryCreate(10000)) + val defaultMemberCache: CacheConfig = + CacheConfig(maximumSize = PositiveNumeric.tryCreate(1000)) + val kmsMetadataCache: CacheConfig = + CacheConfig(maximumSize = PositiveNumeric.tryCreate(20)) + val defaultFinalizedMediatorRequestsCache = + CacheConfig(maximumSize = PositiveNumeric.tryCreate(1000)) + @VisibleForTesting + val testing = + CachingConfigs(contractStore = CacheConfig(maximumSize = PositiveNumeric.tryCreate(100))) + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/CantonRequireTypes.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/CantonRequireTypes.scala new file mode 100644 index 0000000000..d5a89756d6 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/CantonRequireTypes.scala @@ -0,0 +1,593 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +import cats.Order +import cats.syntax.either.* +import cats.syntax.option.* +import cats.syntax.traverse.* +import com.digitalasset.canton.ProtoDeserializationError.InvariantViolation as ProtoInvariantViolation +import com.digitalasset.canton.checked +import com.digitalasset.canton.config.CantonRequireTypes.InstanceName.InvalidInstanceName +import com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString.InvalidLengthString +import com.digitalasset.canton.config.RequireTypes.InvariantViolation +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.store.db.DbDeserializationException +import com.digitalasset.canton.util.NoCopy +import com.digitalasset.canton.util.ShowUtil.* +import io.circe.{Encoder, KeyEncoder} +import pureconfig.error.FailureReason +import pureconfig.{ConfigReader, ConfigWriter} +import slick.jdbc.{GetResult, SetParameter} + +import java.util.UUID + +/** Encapsulates those classes and their utility methods which enforce a given invariant via the use of require. */ +object CantonRequireTypes { + final case class NonEmptyString(private val str: String) extends NoCopy { + def unwrap: String = str + require(str.nonEmpty, s"Unable to create a NonEmptyString as the empty string $str was given.") + } + + object NonEmptyString { + private[this] def apply(str: String): NonEmptyString = + throw new UnsupportedOperationException("Use create or tryCreate methods") + + def create(str: String): Either[InvariantViolation, NonEmptyString] = { + Either.cond( + str.nonEmpty, + new NonEmptyString(str), + InvariantViolation(s"Unable to create a NonEmptyString as the empty string $str was given."), + ) + } + + def tryCreate(str: String): NonEmptyString = { + new NonEmptyString(str) + } + + lazy implicit val nonEmptyStringReader: ConfigReader[NonEmptyString] = { + ConfigReader.fromString[NonEmptyString] { str => + Either.cond(str.nonEmpty, new NonEmptyString(str), EmptyString(str)) + } + } + + final case class EmptyString(str: String) extends FailureReason { + override def description: String = + s"The value you gave for this configuration setting ('$str') was the empty string, but we require a non-empty string for this configuration setting" + } + } + + /** This trait wraps a String that is limited to a certain maximum length. + * The canonical use case is ensuring that we don't write too long strings into the database. + * + * You should normally implement [[LengthLimitedString]] or use its subclasses, + * for strings to be stored in standard string columns. + * + * As this class implements fewer checks, this also serves as the basis for longer strings such as CLOBs. + */ + sealed trait AbstractLengthLimitedString extends NoCopy { + def str: String + + /** Maximum number of characters allowed. + * + * Must not be confused with storage space, which can be up to 4*[[maxLength]] in a UTF8 encoding + */ + def maxLength: Int + // optionally give a name for the type of String you are attempting to validate for nicer error messages + def name: Option[String] = None + + // overwriting equals here to improve console UX - see e.g. issue i7071 for context + @SuppressWarnings(Array("org.wartremover.warts.IsInstanceOf")) + def canEqual(a: Any): Boolean = + a.isInstanceOf[AbstractLengthLimitedString] || a.isInstanceOf[String] + + override def equals(that: Any): Boolean = + that match { + case that: AbstractLengthLimitedString => + that.canEqual(this) && this.str == that.str && this.maxLength == that.maxLength + case that: String => that.canEqual(this) && this.str == that + case _ => false + } + + override def hashCode(): Int = str.hashCode() + + require( + str.length <= maxLength, + s"The given ${name.getOrElse("string")} has a maximum length of $maxLength but a ${name + .getOrElse("string")} of length ${str.length} ('$str') was given", + ) + + def unwrap: String = str + def toProtoPrimitive: String = str + + override def toString: String = str + + def nonEmpty: Boolean = str.nonEmpty + } + + /** This trait wraps a String that is limited to a certain maximum length. + * Classes implementing this trait expose `create` and `tryCreate` methods to safely (and non-safely) construct + * such a String. + * + * The canonical use case for [[LengthLimitedString]]s is ensuring that we don't write too long strings into the database: + * Oracle has a length-limit of 1000 Unicode characters for the ordinary String type `NVARCHAR2` and we are trying to avoid + * the use of CLOB (as it has pitfalls regarding implicits). + * This validation generally occurs on the server side and not on the client side. Concretely, this means that the + * Admin API and Ledger API gRPC services is the point where we validate that the received Protobuf Strings are not too long + * (and convert them into [[LengthLimitedString]]s). On the client side, e.g. at the console, we generally take normal String types. + * The console command `set_display_name` and service [[com.digitalasset.canton.participant.admin.grpc.GrpcPartyNameManagementService]] + * validating `request.displayName` illustrate this. + * + * As a rule of thumb: whenever you want to create a column that uses a NVARCHAR2 in Oracle, the value you write to + * it should use a LengthLimitedString. + * + * Some more background on the Oracle issues: + * NVARCHAR and NVARCHAR2 have both by default a 4000 byte limit, but unicode uses 4-bytes per character (and nvarchar2 uses unicode) + * Therefore, NVARCHAR has a limit of 4000 and NVARCHAR2 has a limit of 1000 characters + * If need be, we can extend this to 32 KB by setting the Oracle database string size to 'extended mode' (ALTER SYSTEM SET MAX_STRING_SIZE=EXTENDED) + * + * For longer strings, directly inherit from [[AbstractLengthLimitedString]]. + */ + sealed trait LengthLimitedString extends AbstractLengthLimitedString { + require( + maxLength > 0 && maxLength <= LengthLimitedString.maxOracleStringLength, + s"MaxLength needs to be positive and smaller equal than ${LengthLimitedString.maxOracleStringLength} but was $maxLength", + ) + + def tryConcatenate(that: LengthLimitedString): LengthLimitedStringVar = + new LengthLimitedStringVar(this.unwrap + that.unwrap, this.maxLength + that.maxLength)() + + def tryConcatenate(that: String): LengthLimitedStringVar = + new LengthLimitedStringVar(this.unwrap + that, this.maxLength + that.length)() + } + + object LengthLimitedString { + // Max length of unicode strings we can save as String types in Oracle columns - this can be increased to + // 1000 for NVARCHAR2 but we set it to 300 for now since we don't need a higher limit and rather want to stay on the + // conservative side + val maxOracleStringLength = 300 + // In general, if you would create a case class that would simply wrap a `LengthLimitedString`, use a type alias instead + // Some very frequently-used classes (like `Identifier` or `DomainAlias`) are however given their 'own' case class + // despite essentially being a wrapper around `LengthLimitedString255` (because the documentation UX is nicer this way, + // and one can e.g. write `Fingerprint.tryCreate` instead of `LengthLimitedString68.tryCreate`) + type DisplayName = String255 + type TopologyRequestId = String255 + type DarName = String255 + + def errorMsg(tooLongStr: String, maxLength: Int, name: Option[String] = None): String = + s"The given ${name.getOrElse("string")} has a maximum length of $maxLength but a ${name + .getOrElse("string")} of length ${tooLongStr.length} ('${tooLongStr.limit(maxLength + 50)}.') was given" + + val defaultMaxLength = 255 + + def tryCreate(str: String, maxLength: Int, name: Option[String] = None): LengthLimitedString = { + new LengthLimitedStringVar(str, maxLength)(name) + } + + def getUuid: String36 = String36.tryCreate(UUID.randomUUID().toString) + + def create( + str: String, + maxLength: Int, + name: Option[String] = None, + ): Either[String, LengthLimitedString] = { + Either.cond( + str.length <= maxLength, + new LengthLimitedStringVar(str, maxLength)(name), + errorMsg(str, maxLength, name), + ) + } + def fromProtoPrimitive( + str: String, + name: Option[String] = None, + ): ParsingResult[LengthLimitedString] = + LengthLimitedString + .create(str, defaultMaxLength, name) + .leftMap(e => ProtoInvariantViolation(e)) + + // Should be used rarely - most of the time SetParameter[String255] etc. + // (defined through LengthLimitedStringCompanion) should be used + @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) + implicit val setParameterLengthLimitedString: SetParameter[LengthLimitedString] = (v, pp) => + pp.setString(v.unwrap) + // Commented out so this function never accidentally throws + // implicit def getResultLengthLimitedString: GetResult[LengthLimitedString] = + // throw new UnsupportedOperationException( + // "Avoid attempting to read a generic LengthLimitedString from the database, as this may lead to unexpected " + + // "equality-comparisons (since a LengthLimitedString comparison also includes the maximum length and not only the string-content). " + + // "Instead refactor your code to expect a specific LengthLimitedString when reading from the database (e.g. via GetResult[String255]). " + + // "If you really need this functionality, then you can add this method again. ") + + implicit val orderingLengthLimitedString: Ordering[LengthLimitedString] = + Ordering.by[LengthLimitedString, String](_.str) + implicit val lengthLimitedStringOrder: Order[LengthLimitedString] = + Order.by[LengthLimitedString, String](_.str) + + final case class InvalidLengthString(str: String) extends FailureReason { + override def description: String = + s"The string you gave for this configuration setting ('$str') had size ${str.length}, but we require a string with length <= $defaultMaxLength for this configuration setting" + } + } + + final case class String1(str: String)(override val name: Option[String] = None) + extends LengthLimitedString { + override def maxLength: Int = String1.maxLength + } + object String1 extends LengthLimitedStringCompanion[String1] { + def fromChar(c: Char): String1 = checked(new String1(c.toString)(None)) + + override def maxLength: Int = 1 + + override protected def factoryMethod(str: String)(name: Option[String]): String1 = + new String1(str)(name) + } + + /** Limit used for enum names. */ + final case class String3(str: String)(override val name: Option[String] = None) + extends LengthLimitedString { + override def maxLength: Int = String3.maxLength + } + + object String3 extends LengthLimitedStringCompanion[String3] { + override def maxLength: Int = 3 + + override protected def factoryMethod(str: String)(name: Option[String]): String3 = + new String3(str)(name) + } + + /** Limit used by a UUID. */ + final case class String36(str: String)(override val name: Option[String] = None) + extends LengthLimitedString { + override def maxLength: Int = String36.maxLength + + def asString255: String255 = String255.tryCreate(str, name) + } + + object String36 extends LengthLimitedStringCompanion[String36] { + override def maxLength: Int = 36 + + override protected def factoryMethod(str: String)(name: Option[String]): String36 = + new String36(str)(name) + } + + /** Limit used by a hash (SHA256 in particular) in a [[com.digitalasset.canton.topology.UniqueIdentifier]]. + * + * @see com.digitalasset.canton.topology.UniqueIdentifier for documentation on its origin + */ + final case class String68(str: String)(override val name: Option[String] = None) + extends LengthLimitedString { + override def maxLength: Int = String68.maxLength + } + + object String68 extends LengthLimitedStringCompanion[String68] { + override def maxLength: Int = 68 + + override def factoryMethod(str: String)(name: Option[String]): String68 = + new String68(str)(name) + } + + /** Limit used by a [[com.digitalasset.canton.sequencing.protocol.MessageId]]. */ + final case class String73(str: String)(override val name: Option[String] = None) + extends LengthLimitedString { + override def maxLength: Int = String73.maxLength + } + + object String73 extends LengthLimitedStringCompanion[String73] { + override def maxLength: Int = 73 + + override protected def factoryMethod(str: String)(name: Option[String]): String73 = + new String73(str)(name) + } + + final case class String100(str: String)(override val name: Option[String] = None) + extends LengthLimitedString { + override def maxLength: Int = String100.maxLength + } + object String100 extends LengthLimitedStringCompanion[String100] { + override def maxLength: Int = 100 + override protected def factoryMethod(str: String)(name: Option[String]): String100 = + new String100(str)(name) + } + + /** Limit used by [[com.digitalasset.canton.topology.Identifier]]. + * + * @see com.digitalasset.canton.topology.Identifier for documentation on its origin + */ + final case class String185(str: String)(override val name: Option[String] = None) + extends LengthLimitedString { + override def maxLength: Int = String185.maxLength + } + + object String185 extends LengthLimitedStringCompanion[String185] { + override def maxLength: Int = 185 + + override def factoryMethod(str: String)(name: Option[String]): String185 = + new String185(str)(name) + } + + /** Default [[LengthLimitedString]] that should be used when in doubt. + * 255 was chosen as it is also the limit used in the upstream code for, e.g., LedgerStrings in the upstream code + * + * @param name optionally set it to improve the error message. It is given as an extra argument, so the automatically generated `equals`-method doesn't use it for comparison + */ + final case class String255(str: String)(override val name: Option[String] = None) + extends LengthLimitedString { + override def maxLength: Int = String255.maxLength + + def asString300: String300 = new String300(str)(name) + def asString1GB: String256M = new String256M(str)(name) + } + + object String255 extends LengthLimitedStringCompanion[String255] { + override def maxLength = 255 + + override def factoryMethod(str: String)(name: Option[String]): String255 = + new String255(str)(name) + } + + /** Longest limited-length strings that have been needed so far. + * Typical use case: when a 255-length identifier is combined + * with other short suffixes or prefixes to further specialize them. + * + * @see com.digitalasset.canton.store.db.SequencerClientDiscriminator + * @see com.digitalasset.canton.crypto.KeyName + */ + final case class String300(str: String)(override val name: Option[String] = None) + extends LengthLimitedString { + override def maxLength: Int = String300.maxLength + } + + object String300 extends LengthLimitedStringCompanion[String300] { + override def maxLength = 300 + + override def factoryMethod(str: String)(name: Option[String]): String300 = + new String300(str)(name) + } + + /** Length limitation for an [[com.digitalasset.canton.protocol.LfTemplateId]]. + * A [[com.digitalasset.canton.protocol.LfTemplateId]] consists of + * - The module name ([[com.daml.lf.data.Ref.DottedName]]) + * - The template name ([[com.daml.lf.data.Ref.DottedName]]) + * - The package ID + * - Two separating dots + * Each [[com.daml.lf.data.Ref.DottedName]] can have 1000 chars ([[com.daml.lf.data.Ref.DottedName.maxLength]]). + * So a [[com.digitalasset.canton.protocol.LfTemplateId]] serializes to 1000 + 1000 + 64 + 2 = 2066 chars. + * + * 2066 is beyond the string size for Oracle's `NVARCHAR2` column type unless `max_string_size` is set to `extended`. + * Such strings may therefore be written into `VARCHAR2` columns using an encoding that does not exceed the 4000 bytes limit. + * UTF8 is such an encoding for ASCII-only strings, but we do not yet test that `str` really contains only ASCII characters. + */ + final case class String2066(str: String)(override val name: Option[String] = None) + extends AbstractLengthLimitedString { + override def maxLength: Int = String2066.maxLength + } + object String2066 extends LengthLimitedStringCompanion[String2066] { + override def maxLength: Int = 4000 + + override protected def factoryMethod(str: String)(name: Option[String]): String2066 = + new String2066(str)(name) + } + + /** Length limitation of a `TEXT` or unbounded `VARCHAR` field in postgres or `CLOB` in Oracle. + * - Postgres `TEXT` or `VARCHAR` support up to 1GB storage. That is at least `2 ^ 28` characters + * in UTF8 encoding as each character needs at most 4 bytes. + * - Oracle `CLOB` supports up to 4GB storage, i.e., at least `2 ^ 30` UTF8 characters + * + * `TEXT`/`VARCHAR`/`CLOB` are only used for the following values (none are indices): + * - daml_packages.source_description + * - service_agreements.agreement_text + * - topology_transactions.ignore_reason + */ + final case class String256M(str: String)(override val name: Option[String] = None) + extends AbstractLengthLimitedString { + override def maxLength: Int = String256M.maxLength + } + object String256M extends LengthLimitedStringCompanion[String256M] { + override def maxLength: Int = 0x10000000 + + override protected def factoryMethod(str: String)(name: Option[String]): String256M = + new String256M(str)(name) + } + + final case class LengthLimitedStringVar(override val str: String, maxLength: Int)( + override val name: Option[String] = None + ) extends LengthLimitedString + object LengthLimitedStringVar { + private[this] def apply(str: String): LengthLimitedStringVar = + throw new UnsupportedOperationException("Use create or tryCreate methods") + } + + /** Trait that implements method commonly needed in the companion object of an [[AbstractLengthLimitedString]] */ + trait LengthLimitedStringCompanion[A <: AbstractLengthLimitedString] { + + val empty: A = checked(factoryMethod("")(None)) + + /** The maximum string length. Should not be overwritten with `val` to avoid initialization issues. */ + def maxLength: Int + + /** Factory method for creating a string. + * @throws java.lang.IllegalArgumentException if `str` is longer than [[maxLength]] + */ + protected def factoryMethod(str: String)(name: Option[String]): A + + def create(str: String, name: Option[String] = None): Either[String, A] = + Either.cond( + str.length <= maxLength, + factoryMethod(str)(name), + LengthLimitedString.errorMsg(str, maxLength, name), + ) + + private[this] def apply(str: String): A = + throw new UnsupportedOperationException("Use create or tryCreate methods") + + def tryCreate(str: String, name: Option[String] = None): A = + factoryMethod(str)(name) + + def fromProtoPrimitive(str: String, name: String): ParsingResult[A] = + create(str, Some(name)).leftMap(e => ProtoInvariantViolation(e)) + + implicit val lengthLimitedStringOrder: Order[A] = + Order.by[A, String](_.str) + + implicit val encodeLengthLimitedString: Encoder[A] = + Encoder.encodeString.contramap[A](_.unwrap) + + @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) + implicit val setParameterLengthLimitedString: SetParameter[A] = (v, pp) => + pp.setString(v.unwrap) + implicit val getResultLengthLimitedString: GetResult[A] = + GetResult(r => tryCreate(r.nextString())) + + @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) + implicit val setParameterOptLengthLimitedString: SetParameter[Option[A]] = (v, pp) => + pp.setStringOption(v.map(_.unwrap)) + implicit val getResultOptLengthLimitedString: GetResult[Option[A]] = + GetResult(r => r.nextStringOption().map(tryCreate(_))) + + implicit val lengthLimitedStringReader: ConfigReader[A] = { + ConfigReader.fromString[A] { str => + Either.cond( + str.nonEmpty && str.length <= maxLength, + factoryMethod(str)(None), + InvalidLengthString(str), + ) + } + } + + implicit val lengthLimitedStringWriter: ConfigWriter[A] = ConfigWriter.toString(_.unwrap) + } + + /** Trait for case classes that are a wrapper around a [[LengthLimitedString]]. + * @see com.digitalasset.canton.crypto.CertificateId for an example + */ + trait LengthLimitedStringWrapper { + protected val str: LengthLimitedString + def unwrap: String = str.unwrap + def toProtoPrimitive: String = str.unwrap + override def toString: String = unwrap + // overwriting equals here to improve console UX - see e.g. issue i7071 for context + @SuppressWarnings(Array("org.wartremover.warts.IsInstanceOf")) + def canEqual(a: Any): Boolean = + a.isInstanceOf[LengthLimitedStringWrapper] || a.isInstanceOf[String] + + override def equals(that: Any): Boolean = + that match { + case that: LengthLimitedStringWrapper => + that.canEqual(this) && this.getClass == that.getClass && this.str == that.str + case that: String => that.canEqual(this) && this.unwrap == that + case _ => false + } + + override def hashCode(): Int = unwrap.hashCode() + } + + /** Trait that implements utility methods to avoid boilerplate in the companion object of a case class that wraps a + * [[LengthLimitedString]] type using [[LengthLimitedStringWrapper]]. + * + * @see com.digitalasset.canton.crypto.CertificateId for an example + */ + trait LengthLimitedStringWrapperCompanion[ + A <: LengthLimitedString, + Wrapper <: LengthLimitedStringWrapper, + ] { + + def instanceName: String + protected def companion: LengthLimitedStringCompanion[A] + protected def factoryMethodWrapper(str: A): Wrapper + + def create(str: String): Either[String, Wrapper] = + companion.create(str, instanceName.some).map(factoryMethodWrapper) + + def tryCreate(str: String): Wrapper = factoryMethodWrapper( + companion.tryCreate(str, instanceName.some) + ) + + def fromProtoPrimitive(str: String): ParsingResult[Wrapper] = + companion.fromProtoPrimitive(str, instanceName).map(factoryMethodWrapper) + + implicit val wrapperOrder: Order[Wrapper] = + Order.by[Wrapper, String](_.unwrap) + + implicit val encodeWrapper: Encoder[Wrapper] = + Encoder.encodeString.contramap[Wrapper](_.unwrap) + + // Instances for slick (db) queries + @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) + implicit val setParameterWrapper: SetParameter[Wrapper] = (v, pp) => + pp.setString(v.toProtoPrimitive) + implicit val getResultWrapper: GetResult[Wrapper] = GetResult(r => + fromProtoPrimitive(r.nextString()).valueOr(err => + throw new DbDeserializationException(err.toString) + ) + ) + + @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) + implicit val setParameterOptionWrapper: SetParameter[Option[Wrapper]] = (v, pp) => + pp.setStringOption(v.map(_.toProtoPrimitive)) + implicit val getResultOptionWrapper: GetResult[Option[Wrapper]] = GetResult { r => + r.nextStringOption() + .traverse(fromProtoPrimitive) + .valueOr(err => throw new DbDeserializationException(err.toString)) + } + } + + final case class InstanceName private (unwrap: String) extends NoCopy with PrettyPrinting { + + if (!unwrap.matches("^[a-zA-Z0-9_-]*$")) { + throw InvalidInstanceName( + show"Node name contains invalid characters (allowed: [a-zA-Z0-9_-]): " + + show"${unwrap.limit(InstanceName.maxLength).toString.doubleQuoted}" + ) + } + + if (unwrap.isEmpty) { + throw InvalidInstanceName( + "Empty node name." + ) + } + + if (unwrap.length > InstanceName.maxLength) { + throw InvalidInstanceName( + show"Node name is too long. Max length: ${InstanceName.maxLength}. Length: ${unwrap.length}. " + + show"Name: ${unwrap.limit(InstanceName.maxLength).toString.doubleQuoted}" + ) + } + + def toProtoPrimitive: String = unwrap + + override def pretty: Pretty[InstanceName] = prettyOfParam(_.unwrap.unquoted) + } + + object InstanceName { + val maxLength: Int = 30 + + def create(str: String): Either[InvalidInstanceName, InstanceName] = Either + .catchOnly[InvalidInstanceName](tryCreate(str)) + + def tryCreate(str: String): InstanceName = InstanceName(str) + + def tryFromStringMap[A](map: Map[String, A]): Map[InstanceName, A] = map.map { case (n, c) => + tryCreate(n) -> c + } + + final case class InvalidInstanceName(override val description: String) + extends RuntimeException(description) + with FailureReason + + implicit val instanceNameReader: ConfigReader[InstanceName] = ConfigReader.fromString(create) + implicit def instanceNameKeyReader[A: ConfigReader]: ConfigReader[Map[InstanceName, A]] = + pureconfig.configurable.genericMapReader(create) + + implicit val instanceNameWriter: ConfigWriter[InstanceName] = ConfigWriter.toString(_.unwrap) + implicit def instanceNameKeyWriter[A: ConfigWriter]: ConfigWriter[Map[InstanceName, A]] = + pureconfig.configurable.genericMapWriter(_.unwrap) + + implicit val encodeInstanceName: Encoder[InstanceName] = + Encoder.encodeString.contramap(_.unwrap) + implicit val encodeKeyInstanceName: KeyEncoder[InstanceName] = + KeyEncoder.encodeKeyString.contramap(_.unwrap) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/DomainTimeTrackerConfig.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/DomainTimeTrackerConfig.scala new file mode 100644 index 0000000000..c79de2d884 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/DomainTimeTrackerConfig.scala @@ -0,0 +1,104 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +import cats.syntax.option.* +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.time.admin.v0 + +/** Configuration for the domain time tracker. + * @param observationLatency Even if the host and domain clocks are perfectly synchronized there will always be some latency + * for an event to be delivered (storage, transmission, processing). + * If the current host time exceeds the next expected timestamp by this observation latency + * then we will request a time proof (unless we have received a recent event within the + * patience duration described below). + * @param patienceDuration We will only request a time proof if this given duration has elapsed since we last received + * an event (measured using the host clock). This prevents requesting timestamps when we + * are observing events from the domain (particularly if the local node is catching up on + * old activity). + * @param minObservationDuration We will try to ensure that we receive a time at least once during this duration (measured + * using the host clock). This is practically useful if there is no other activity on + * the domain as the sequencer client will then have an event to acknowledge allowing + * sequenced events to be pruned before this point. We may in the future use this to monitor + * clock skews between the host and domain. + * @param timeRequest configuration for how we ask for a time proof. + */ +final case class DomainTimeTrackerConfig( + observationLatency: NonNegativeFiniteDuration = + DomainTimeTrackerConfig.defaultObservationLatency, + patienceDuration: NonNegativeFiniteDuration = DomainTimeTrackerConfig.defaultPatienceDuration, + minObservationDuration: NonNegativeFiniteDuration = + DomainTimeTrackerConfig.defaultMinObservationDuration, + timeRequest: TimeProofRequestConfig = TimeProofRequestConfig(), +) extends PrettyPrinting { + def toProtoV0: v0.DomainTimeTrackerConfig = v0.DomainTimeTrackerConfig( + observationLatency.toProtoPrimitive.some, + patienceDuration.toProtoPrimitive.some, + minObservationDuration.toProtoPrimitive.some, + timeRequest.toProtoV0.some, + ) + + override def pretty: Pretty[DomainTimeTrackerConfig] = prettyOfClass( + paramIfNotDefault( + "observationLatency", + _.observationLatency, + DomainTimeTrackerConfig.defaultObservationLatency, + ), + paramIfNotDefault( + "patienceDuration", + _.patienceDuration, + DomainTimeTrackerConfig.defaultPatienceDuration, + ), + paramIfNotDefault( + "minObservationDuration", + _.minObservationDuration, + DomainTimeTrackerConfig.defaultMinObservationDuration, + ), + paramIfNotDefault("timeRequest", _.timeRequest, TimeProofRequestConfig()), + ) + +} + +object DomainTimeTrackerConfig { + + private val defaultObservationLatency: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.ofMillis(250) + private val defaultPatienceDuration: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.ofMillis(500) + private val defaultMinObservationDuration: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.ofHours(24) + + def fromProto( + configP: v0.DomainTimeTrackerConfig + ): ParsingResult[DomainTimeTrackerConfig] = + for { + observationLatency <- ProtoConverter.parseRequired( + NonNegativeFiniteDuration.fromProtoPrimitive("observation_latency"), + "observation_latency", + configP.observationLatency, + ) + patienceDuration <- ProtoConverter.parseRequired( + NonNegativeFiniteDuration.fromProtoPrimitive("patience_duration"), + "patience_duration", + configP.patienceDuration, + ) + minObservationDuration <- ProtoConverter.parseRequired( + NonNegativeFiniteDuration.fromProtoPrimitive("min_observationDuration"), + "min_observationDuration", + configP.minObservationDuration, + ) + timeProofRequestConfig <- ProtoConverter.parseRequired( + TimeProofRequestConfig.fromProtoV0, + "time_proof_request", + configP.timeProofRequest, + ) + } yield DomainTimeTrackerConfig( + observationLatency, + patienceDuration, + minObservationDuration, + timeProofRequestConfig, + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/LoggingConfig.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/LoggingConfig.scala new file mode 100644 index 0000000000..d4da4a3219 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/LoggingConfig.scala @@ -0,0 +1,20 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +/** Detailed logging configurations + * + * This section allows to configure additional data such as transaction details to be logged to the standard logback system + * + * @param api Configuration settings for the ApiRequestLogger + * @param eventDetails If set to true, we will log substantial details of internal messages being processed. To be disabled in production! + * @param logConfigOnStartup If set to true (default), it will log the config on startup (omitting sensitive details) + * @param logConfigWithDefaults If set to true (default false), the default values of the config will be included + */ +final case class LoggingConfig( + api: ApiLoggingConfig = ApiLoggingConfig(), + eventDetails: Boolean = false, + logConfigOnStartup: Boolean = true, + logConfigWithDefaults: Boolean = false, +) diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/ProcessingTimeouts.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/ProcessingTimeouts.scala new file mode 100644 index 0000000000..12cbad39a7 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/ProcessingTimeouts.scala @@ -0,0 +1,86 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +import com.google.common.annotations.VisibleForTesting + +import scala.concurrent.duration.* + +/** Configuration for internal await timeouts + * + * @param unbounded timeout on how long "unbounded" operations can run. should be infinite in theory. + * @param io timeout for disk based operations + * @param default default finite processing timeout + * @param network timeout for things related to networking + * @param shutdownProcessing timeout used for shutdown of some processing where we'd like to keep the result (long) + * @param shutdownNetwork timeout used for shutdown where we interact with some remote system + * @param shutdownShort everything else shutdown releated (default) + * @param closing our closing time (which should be strictly larger than any of the shutdown values) + * @param verifyActive how long should we wait for the domain to tell us whether we are active or not + * @param inspection timeout for the storage inspection commands (can run a long long time) + * @param storageMaxRetryInterval max retry interval for storage + * @param activeInit how long a passive replica should wait for the initialization by the active replica + * @param slowFutureWarn when using future supervision, when should we start to warn about a slow future + * @param activeInitRetryDelay delay between attempts while waiting for initialization of the active replica + * @param sequencerInfo how long are we going to try to get the sequencer connection information. setting this high means that + * connect calls will take quite a while if one of the sequencers is offline. + */ +final case class ProcessingTimeout( + unbounded: NonNegativeDuration = DefaultProcessingTimeouts.unbounded, + io: NonNegativeDuration = DefaultProcessingTimeouts.io, + default: NonNegativeDuration = DefaultProcessingTimeouts.default, + network: NonNegativeDuration = DefaultProcessingTimeouts.network, + shutdownProcessing: NonNegativeDuration = DefaultProcessingTimeouts.shutdownProcessing, + shutdownNetwork: NonNegativeDuration = DefaultProcessingTimeouts.shutdownNetwork, + shutdownShort: NonNegativeDuration = DefaultProcessingTimeouts.shutdownShort, + closing: NonNegativeDuration = DefaultProcessingTimeouts.closing, + inspection: NonNegativeDuration = DefaultProcessingTimeouts.inspection, + storageMaxRetryInterval: NonNegativeDuration = DefaultProcessingTimeouts.maxRetryInterval, + verifyActive: NonNegativeDuration = DefaultProcessingTimeouts.verifyActive, + activeInit: NonNegativeDuration = DefaultProcessingTimeouts.activeInit, + slowFutureWarn: NonNegativeDuration = DefaultProcessingTimeouts.slowFutureWarn, + activeInitRetryDelay: NonNegativeDuration = DefaultProcessingTimeouts.activeInitRetryDelay, + sequencerInfo: NonNegativeDuration = DefaultProcessingTimeouts.sequencerInfo, +) + +/** Reasonable default timeouts */ +object DefaultProcessingTimeouts { + val unbounded: NonNegativeDuration = NonNegativeDuration.tryFromDuration(Duration.Inf) + + /** Allow unbounded processing for io operations. This is because we retry forever upon db outages. + */ + val io: NonNegativeDuration = unbounded + + val default: NonNegativeDuration = NonNegativeDuration.tryFromDuration(1.minute) + + val network: NonNegativeDuration = NonNegativeDuration.tryFromDuration(2.minute) + + val shutdownNetwork: NonNegativeDuration = NonNegativeDuration.tryFromDuration(9.seconds) + + val shutdownProcessing: NonNegativeDuration = NonNegativeDuration.tryFromDuration(60.seconds) + + val shutdownShort: NonNegativeDuration = NonNegativeDuration.tryFromDuration(3.seconds) + + val closing: NonNegativeDuration = NonNegativeDuration.tryFromDuration(10.seconds) + + val inspection: NonNegativeDuration = NonNegativeDuration.tryFromDuration(Duration.Inf) + + val maxRetryInterval: NonNegativeDuration = NonNegativeDuration.tryFromDuration(10.seconds) + + val verifyActive: NonNegativeDuration = NonNegativeDuration.tryFromDuration(30.seconds) + + val activeInit: NonNegativeDuration = NonNegativeDuration.tryFromDuration(1.minute) + + val activeInitRetryDelay: NonNegativeDuration = NonNegativeDuration.tryFromDuration(50.millis) + + val warnUnbounded: NonNegativeDuration = NonNegativeDuration.tryFromDuration(30.seconds) + + val slowFutureWarn: NonNegativeDuration = NonNegativeDuration.tryFromDuration(5.seconds) + + val sequencerInfo: NonNegativeDuration = NonNegativeDuration.tryFromDuration(30.seconds) + + @VisibleForTesting + lazy val testing: ProcessingTimeout = ProcessingTimeout() + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/QueryCostMonitoringConfig.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/QueryCostMonitoringConfig.scala new file mode 100644 index 0000000000..e624e4ee53 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/QueryCostMonitoringConfig.scala @@ -0,0 +1,16 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +/** Configuration for monitoring the cost of db queries. + * + * @param every determines the duration between reports + * @param resetOnOutput determines whether the statistics will be reset after creating a report + * @param logOperations if true (default false), log every query operation + */ +final case class QueryCostMonitoringConfig( + every: NonNegativeFiniteDuration, + resetOnOutput: Boolean = true, + logOperations: Boolean = false, +) diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/RefinedNonNegativeDuration.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/RefinedNonNegativeDuration.scala new file mode 100644 index 0000000000..80b4353709 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/RefinedNonNegativeDuration.scala @@ -0,0 +1,478 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +import cats.syntax.either.* +import com.digitalasset.canton.ProtoDeserializationError.ValueConversionError +import com.digitalasset.canton.config.RefinedNonNegativeDuration.{ + noisyAwaitResult, + strToFiniteDuration, +} +import com.digitalasset.canton.logging.ErrorLoggingContext +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.{DurationConverter, ParsingResult} +import com.digitalasset.canton.time.{ + NonNegativeFiniteDuration as NonNegativeFiniteDurationInternal, + PositiveSeconds as PositiveSecondsInternal, +} +import com.digitalasset.canton.util.FutureUtil.defaultStackTraceFilter +import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.canton.util.{FutureUtil, LoggerUtil, StackTraceUtil} +import com.digitalasset.canton.{DiscardOps, checked} +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.duration.Duration as PbDuration +import io.circe.Encoder +import io.scalaland.chimney.Transformer +import org.slf4j.event.Level +import pureconfig.error.FailureReason +import pureconfig.{ConfigReader, ConfigWriter} + +import java.time.Duration as JDuration +import java.util.concurrent.TimeUnit +import scala.annotation.tailrec +import scala.concurrent.duration.* +import scala.concurrent.{Await, Future, TimeoutException} +import scala.util.{Failure, Success, Try} + +trait RefinedNonNegativeDuration[D <: RefinedNonNegativeDuration[D]] extends PrettyPrinting { + this: { + def update(newDuration: Duration): D + } => + + override def pretty: Pretty[RefinedNonNegativeDuration[D]] = prettyOfParam(_.duration) + + def duration: Duration + + def unwrap: Duration = duration + + def asFiniteApproximation: FiniteDuration + + def asJavaApproximation: JDuration = JDuration.ofMillis(asFiniteApproximation.toMillis) + def minusSeconds(s: Int): D = update(duration.minus(s.seconds)) + + def +(other: D): D = update(duration.plus(other.duration)) + def plusSeconds(s: Int): D = update(duration.plus(s.seconds)) + + def *(d: Double): D = update(duration * d) + + def retries(interval: Duration): Int = { + if (interval.isFinite && duration.isFinite) + Math.max(0, duration.toMillis / Math.max(1, interval.toMillis)).toInt + else Int.MaxValue + } + + /** Same as Await.result, but with this timeout */ + def await[F]( + description: => String, + logFailing: Option[Level] = None, + stackTraceFilter: Thread => Boolean = defaultStackTraceFilter, + onTimeout: TimeoutException => Unit = _ => (), + )(fut: Future[F])(implicit loggingContext: ErrorLoggingContext): F = + noisyAwaitResult( + logFailing.fold(fut)(level => FutureUtil.logOnFailure(fut, description, level = level)), + description, + timeout = duration, + stackTraceFilter = stackTraceFilter, + onTimeout = onTimeout, + ) + + /** Same as await, but not returning a value */ + def await_( + description: => String, + logFailing: Option[Level] = None, + )(fut: Future[?])(implicit loggingContext: ErrorLoggingContext): Unit = + await(description, logFailing)(fut).discard + + def toProtoPrimitive: com.google.protobuf.duration.Duration = { + val d = asJavaApproximation + com.google.protobuf.duration.Duration(d.getSeconds, d.getNano) + } +} + +trait RefinedNonNegativeDurationCompanion[D <: RefinedNonNegativeDuration[D]] { + this: { + def apply(newDuration: Duration): D + } => + + implicit val timeoutDurationEncoder: Encoder[D] = + Encoder[String].contramap(_.unwrap.toString) + + implicit val orderingRefinedDuration: Ordering[D] = Ordering.by(_.duration) + + def fromDuration(duration: Duration): Either[String, D] + + def fromProtoPrimitive( + field: String + )(durationP: PbDuration): ParsingResult[D] = + for { + duration <- DurationConverter.fromProtoPrimitive(durationP) + refinedDuration <- fromJavaDuration(duration).leftMap(err => ValueConversionError(field, err)) + } yield refinedDuration + + def fromProtoPrimitiveO( + field: String + )(durationPO: Option[PbDuration]): ParsingResult[D] = + for { + durationP <- ProtoConverter.required(field, durationPO) + refinedDuration <- fromProtoPrimitive(field)(durationP) + } yield refinedDuration + + def tryFromDuration(duration: Duration): D = fromDuration(duration) match { + case Left(err) => throw new IllegalArgumentException(err) + case Right(x) => x + } + + def fromJavaDuration(duration: java.time.Duration): Either[String, D] = + fromDuration(Duration.fromNanos(duration.toNanos)) + + def tryFromJavaDuration(duration: java.time.Duration): D = + tryFromDuration(Duration.fromNanos(duration.toNanos)) + + def ofMillis(millis: Long): D = apply(Duration(millis, TimeUnit.MILLISECONDS)) + + def ofSeconds(secs: Long): D = apply(Duration(secs, TimeUnit.SECONDS)) + + def ofMinutes(minutes: Long): D = apply(Duration(minutes, TimeUnit.MINUTES)) + + def ofHours(hours: Long): D = apply(Duration(hours, TimeUnit.HOURS)) + + def ofDays(days: Long): D = apply(Duration(days, TimeUnit.DAYS)) +} + +object RefinedNonNegativeDuration { + + /** Await the result of a future, logging periodically if the future is taking "too long". + * + * @param future The future to await + * @param description A description of the future, for logging + * @param timeout The timeout for the future to complete within + * @param warnAfter The amount of time to wait for the future to complete before starting to complain. + * @param killAwait A kill-switch for the noisy await + */ + @SuppressWarnings(Array("org.wartremover.warts.TryPartial")) + private def noisyAwaitResult[T]( + future: Future[T], + description: => String, + timeout: Duration = Duration.Inf, + warnAfter: Duration = 1.minute, + killAwait: Unit => Boolean = _ => false, + stackTraceFilter: Thread => Boolean = defaultStackTraceFilter, + onTimeout: TimeoutException => Unit = _ => (), + )(implicit loggingContext: ErrorLoggingContext): T = { + val warnAfterAdjusted = { + // if warnAfter is larger than timeout, make a sensible choice + if (timeout.isFinite && warnAfter.isFinite && warnAfter > timeout) { + timeout / 2 + } else warnAfter + } + + // Use Await.ready instead of Await.result to be able to tell the difference between the awaitable throwing a + // TimeoutException and a TimeoutException being thrown because the awaitable is not ready. + def ready(f: Future[T], d: Duration): Try[Future[T]] = Try(Await.ready(f, d)) + + def log(level: Level, message: String): Unit = LoggerUtil.logAtLevel(level, message) + + // TODO(i4008) increase the log level to WARN + val res = + noisyAwaitResultForTesting( + future, + description, + timeout, + log, + () => System.nanoTime(), + warnAfterAdjusted, + killAwait, + stackTraceFilter, + )(ready) + + res match { + case Failure(ex: TimeoutException) => onTimeout(ex) + case _ => () + } + + res.get + } + + @VisibleForTesting + private[config] def noisyAwaitResultForTesting[T]( + future: Future[T], + description: => String, + timeout: Duration, + log: (Level, String) => Unit, + nanoTime: () => Long, + warnAfter: Duration, + killAwait: Unit => Boolean = _ => false, + stackTraceFilter: Thread => Boolean, + )(ready: (Future[T], Duration) => Try[Future[T]]): Try[T] = { + + require(warnAfter >= Duration.Zero, show"warnAfter must not be negative: $warnAfter") + + val startTime = nanoTime() + + @tailrec def retry(remaining: Duration, interval: Duration): Try[T] = { + + if (killAwait(())) { + throw new TimeoutException(s"Noisy await result $description cancelled with kill-switch.") + } + + val toWait = remaining + .min(interval) + // never wait more than 10 seconds to prevent starving on excessively long awaits + .min(10.seconds) + + if (toWait > Duration.Zero) { + ready(future, toWait) match { + case Success(future) => + future.value.getOrElse( + Failure( + new RuntimeException( + s"Future $future not complete after successful Await.ready, this should never happen" + ) + ) + ) + + case Failure(_: TimeoutException) => + val now = nanoTime() + val waited = Duration(now - startTime, NANOSECONDS) + val waitedReadable = LoggerUtil.roundDurationForHumans(waited) + log( + if (waited >= warnAfter) Level.INFO else Level.DEBUG, + s"Task $description still not completed after $waitedReadable. Continue waiting...", + ) + val leftOver = timeout.minus(waited) + retry( + leftOver, + if (waited < warnAfter) + warnAfter - waited // this enables warning at the earliest time we are asked to warn + else warnAfter / 2, + ) + + case Failure(exn) => Failure(exn) + } + + } else { + val stackTraces = StackTraceUtil.formatStackTrace(stackTraceFilter) + val msg = s"Task $description did not complete within $timeout." + log(Level.WARN, s"$msg Stack traces:\n$stackTraces") + Failure(new TimeoutException(msg)) + } + } + + retry(timeout, warnAfter) + } + + def strToFiniteDuration(str: String): Either[String, FiniteDuration] = + Either + .catchOnly[NumberFormatException](Duration.apply(str)) + .leftMap(_.getMessage) + .flatMap(duration => + Some(duration) + .collect { case d: FiniteDuration => d } + .toRight("Duration is not a finite duration") + ) +} + +/** Duration class used for non-negative durations. + * + * There are two options: either it's a non-negative duration or an infinite duration + */ +final case class NonNegativeDuration(duration: Duration) + extends RefinedNonNegativeDuration[NonNegativeDuration] { + require(duration >= Duration.Zero, s"Expecting non-negative duration, found: $duration") + + def update(newDuration: Duration): NonNegativeDuration = NonNegativeDuration(newDuration) + + def asFiniteApproximation: FiniteDuration = duration match { + case fd: FiniteDuration => fd + case _: Duration.Infinite => NonNegativeDuration.maxTimeout + } + + private[canton] def toInternal: NonNegativeFiniteDurationInternal = + checked(NonNegativeFiniteDurationInternal.tryCreate(asJavaApproximation)) +} + +object NonNegativeDuration extends RefinedNonNegativeDurationCompanion[NonNegativeDuration] { + val maxTimeout: FiniteDuration = 100000.days + val Zero: NonNegativeDuration = NonNegativeDuration(Duration.Zero) + + def fromDuration(duration: Duration): Either[String, NonNegativeDuration] = duration match { + case x: FiniteDuration => + Either.cond(x.length >= 0, NonNegativeDuration(x), s"Duration ${x} is negative!") + case Duration.Inf => Right(NonNegativeDuration(Duration.Inf)) + case x => Left(s"Duration ${x} is not a valid duration that can be used for timeouts.") + } +} + +/** Duration class used for non-negative finite durations. */ +final case class NonNegativeFiniteDuration(underlying: FiniteDuration) + extends RefinedNonNegativeDuration[NonNegativeFiniteDuration] { + + require(underlying >= Duration.Zero, s"Duration ${duration} is negative") + + def duration: Duration = underlying + def asJava: JDuration = JDuration.ofNanos(duration.toNanos) + + def update(newDuration: Duration): NonNegativeFiniteDuration = newDuration match { + case _: Duration.Infinite => + throw new IllegalArgumentException(s"Duration must be finite, but is Duration.Inf") + case duration: FiniteDuration => NonNegativeFiniteDuration(duration) + } + + def asFiniteApproximation: FiniteDuration = underlying +} + +object NonNegativeFiniteDuration + extends RefinedNonNegativeDurationCompanion[NonNegativeFiniteDuration] { + val Zero: NonNegativeFiniteDuration = NonNegativeFiniteDuration(Duration.Zero) + + def apply(duration: Duration): NonNegativeFiniteDuration = NonNegativeFiniteDuration + .fromDuration(duration) + .fold(err => throw new IllegalArgumentException(err), identity) + + def apply(duration: JDuration): NonNegativeFiniteDuration = + NonNegativeFiniteDuration.tryFromJavaDuration(duration) + + implicit val forgetRefinementJDuration: Transformer[NonNegativeFiniteDuration, JDuration] = + _.asJava + implicit val forgetRefinementFDuration: Transformer[NonNegativeFiniteDuration, FiniteDuration] = + _.underlying + + def fromDuration(duration: Duration): Either[String, NonNegativeFiniteDuration] = duration match { + case x: FiniteDuration => + Either.cond(x.length >= 0, NonNegativeFiniteDuration(x), s"Duration $x is negative!") + case Duration.Inf => Left(s"Expecting finite duration but found Duration.Inf") + case x => Left(s"Duration $x is not a valid duration that can be used for timeouts.") + } + + private[canton] final case class NonNegativeFiniteDurationError(input: String, reason: String) + extends FailureReason { + override def description: String = + s"Cannot convert `$input` to a non-negative finite duration: $reason" + } + + private[canton] implicit val nonNegativeFiniteDurationReader + : ConfigReader[NonNegativeFiniteDuration] = + ConfigReader.fromString[NonNegativeFiniteDuration] { str => + (for { + duration <- strToFiniteDuration(str) + nonNegativeFiniteDuration <- fromDuration(duration) + } yield nonNegativeFiniteDuration).leftMap(NonNegativeFiniteDurationError(str, _)) + } + + private[canton] implicit val nonNegativeFiniteDurationWriter + : ConfigWriter[NonNegativeFiniteDuration] = + // avoid pretty printing by converting the underlying value to string + ConfigWriter.toString(_.underlying.toString) +} + +/** Duration class used for positive finite durations. */ +final case class PositiveFiniteDuration(underlying: FiniteDuration) + extends RefinedNonNegativeDuration[PositiveFiniteDuration] { + + require(underlying > Duration.Zero, s"Duration ${duration} is not positive") + + def duration: Duration = underlying + def asJava: JDuration = JDuration.ofNanos(duration.toNanos) + + def update(newDuration: Duration): PositiveFiniteDuration = newDuration match { + case _: Duration.Infinite => + throw new IllegalArgumentException(s"Duration must be finite, but is Duration.Inf") + case duration: FiniteDuration => PositiveFiniteDuration(duration) + } + + def asFiniteApproximation: FiniteDuration = underlying +} + +object PositiveFiniteDuration extends RefinedNonNegativeDurationCompanion[PositiveFiniteDuration] { + def apply(duration: Duration): PositiveFiniteDuration = PositiveFiniteDuration + .fromDuration(duration) + .fold(err => throw new IllegalArgumentException(err), identity) + + def fromDuration(duration: Duration): Either[String, PositiveFiniteDuration] = duration match { + case x: FiniteDuration => + Either.cond(x.length > 0, PositiveFiniteDuration(x), s"Duration $x is not positive!") + case Duration.Inf => Left(s"Expecting finite duration but found Duration.Inf") + case x => Left(s"Duration $x is not a valid duration that can be used for timeouts.") + } + + private[canton] final case class PositiveFiniteDurationError(input: String, reason: String) + extends FailureReason { + override def description: String = + s"Cannot convert `$input` to a positive finite duration: $reason" + } + + private[canton] implicit val positiveFiniteDurationReader + : ConfigReader[PositiveFiniteDuration] = { + ConfigReader.fromString[PositiveFiniteDuration] { str => + (for { + duration <- strToFiniteDuration(str) + positiveFiniteDuration <- PositiveFiniteDuration.fromDuration(duration) + } yield positiveFiniteDuration).leftMap(PositiveFiniteDurationError(str, _)) + } + } + + private[canton] implicit val positiveFiniteDurationWriter: ConfigWriter[PositiveFiniteDuration] = + // avoid pretty printing by converting the underlying value to string + ConfigWriter.toString(_.underlying.toString) +} + +/** Duration class used for positive durations that are rounded to the second. */ +final case class PositiveDurationSeconds(underlying: FiniteDuration) + extends RefinedNonNegativeDuration[PositiveDurationSeconds] { + + require(underlying > Duration.Zero, s"Duration ${duration} is not positive") + require( + PositiveDurationSeconds.isRoundedToTheSecond(underlying), + s"Duration ${duration} is not rounded to the second", + ) + + def duration: Duration = underlying + def asJava: JDuration = JDuration.ofNanos(duration.toNanos) + + def update(newDuration: Duration): PositiveDurationSeconds = newDuration match { + case _: Duration.Infinite => + throw new IllegalArgumentException(s"Duration must be finite, but is Duration.Inf") + case duration: FiniteDuration => PositiveDurationSeconds(duration) + } + + def asFiniteApproximation: FiniteDuration = underlying + + private[canton] def toInternal: PositiveSecondsInternal = checked( + PositiveSecondsInternal.tryCreate( + asJava + ) + ) +} + +object PositiveDurationSeconds + extends RefinedNonNegativeDurationCompanion[PositiveDurationSeconds] { + private def isRoundedToTheSecond(duration: FiniteDuration): Boolean = + duration == Duration(duration.toSeconds, SECONDS) + + def apply(duration: Duration): PositiveDurationSeconds = PositiveDurationSeconds + .fromDuration(duration) + .fold(err => throw new IllegalArgumentException(err), identity) + + def apply(duration: JDuration): PositiveDurationSeconds = + PositiveDurationSeconds.tryFromJavaDuration(duration) + + def fromDuration(duration: Duration): Either[String, PositiveDurationSeconds] = + duration match { + case x: FiniteDuration => + for { + _ <- Either.cond(x.length > 0, (), s"Duration $x is not positive") + _ <- Either.cond( + isRoundedToTheSecond(x), + (), + s"Duration ${duration} is not rounded to the second", + ) + } yield PositiveDurationSeconds(x) + case Duration.Inf => Left(s"Expecting finite duration but found Duration.Inf") + case x => Left(s"Duration $x is not a valid duration that can be used for timeouts.") + } + + def fromProtoPrimitive(durationP: PbDuration): Either[String, PositiveDurationSeconds] = + fromJavaDuration(JDuration.of(durationP.seconds, java.time.temporal.ChronoUnit.SECONDS)) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/SequencerConnectionConfig.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/SequencerConnectionConfig.scala new file mode 100644 index 0000000000..9b5d9a4b17 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/SequencerConnectionConfig.scala @@ -0,0 +1,64 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +import better.files.* +import cats.syntax.traverse.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.SequencerAlias +import com.digitalasset.canton.config.RequireTypes.{ExistingFile, Port} +import com.digitalasset.canton.crypto.X509CertificatePem +import com.digitalasset.canton.networking.Endpoint +import com.digitalasset.canton.sequencing.{GrpcSequencerConnection, SequencerConnection} +import com.google.protobuf.ByteString + +/** Definition provided by the domain node to members with details on how to connect to the domain sequencer. * */ +sealed trait SequencerConnectionConfig { + def toConnection: Either[String, SequencerConnection] +} + +object SequencerConnectionConfig { + + // TODO(i3804) consolidate with TlsClientCertificate + sealed trait CertificateConfig extends Product with Serializable { + def pem: X509CertificatePem + } + + /** Throws an exception if the file does not exist or cannot be loaded. */ + final case class CertificateFile(pemFile: ExistingFile) extends CertificateConfig { + override val pem: X509CertificatePem = X509CertificatePem.tryFromFile(pemFile.unwrap.toScala) + } + + /** Throws an exception if the string containing the PEM certificate cannot be loaded. */ + final case class CertificateString(pemString: String) extends CertificateConfig { + override val pem: X509CertificatePem = X509CertificatePem.tryFromString(pemString) + } + + object CertificateConfig { + def apply(bytes: ByteString): CertificateConfig = + CertificateString(bytes.toStringUtf8) + } + + /** Grpc connection using a real grpc channel. + */ + final case class Grpc( + address: String, + port: Port, + transportSecurity: Boolean = false, + customTrustCertificates: Option[CertificateFile] = None, + ) extends SequencerConnectionConfig { + + def toConnection: Either[String, GrpcSequencerConnection] = + for { + pem <- customTrustCertificates.traverse(file => + X509CertificatePem.fromFile(file.pemFile.unwrap.toScala) + ) + } yield GrpcSequencerConnection( + NonEmpty(Seq, Endpoint(address, port)), + transportSecurity, + pem.map(_.unwrap), + SequencerAlias.Default, + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/ServerConfig.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/ServerConfig.scala new file mode 100644 index 0000000000..489b7ffd0e --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/ServerConfig.scala @@ -0,0 +1,384 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +import com.daml.metrics.api.MetricHandle.MetricsFactory +import com.daml.metrics.api.MetricName +import com.daml.metrics.grpc.GrpcServerMetrics +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.config.AdminServerConfig.defaultAddress +import com.digitalasset.canton.config.RequireTypes.{ExistingFile, NonNegativeInt, Port} +import com.digitalasset.canton.config.SequencerConnectionConfig.CertificateFile +import com.digitalasset.canton.domain.api.v0 +import com.digitalasset.canton.ledger.api.tls.TlsVersion +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.networking.grpc.{ + CantonCommunityServerInterceptors, + CantonServerBuilder, + CantonServerInterceptors, +} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.tracing.TracingConfig +import io.netty.handler.ssl.{ClientAuth, SslContext} +import org.slf4j.LoggerFactory + +import java.io.File +import scala.annotation.nowarn +import scala.math.Ordering.Implicits.infixOrderingOps + +/** Configuration for hosting a server api */ +trait ServerConfig extends Product with Serializable { + + /** The address of the interface to be listening on */ + val address: String + + /** Port to be listening on (must be greater than 0). If the port is None, a default port will be assigned on startup. + * + * NOTE: If you rename this field, adapt the corresponding product hint for config reading. In the configuration the + * field is still called `port` for usability reasons. + */ + protected val internalPort: Option[Port] + + /** Returns the configured or the default port that must be assigned after config loading and before config usage. + * + * We split between `port` and `internalPort` to offer a clean API to users of the config in the form of `port`, + * which must always return a configured or default port, and the internal representation that may be None before + * being assigned a default port. + */ + def port: Port = + internalPort.getOrElse( + throw new IllegalStateException("Accessing server port before default was set") + ) + + /** If defined, dictates to use TLS when connecting to this node through the given `address` and `port`. + * Server authentication is always enabled. + * Subclasses may decide whether to support client authentication. + */ + def sslContext: Option[SslContext] + + /** server cert chain file if TLS is defined + * + * Used for domain internal GRPC sequencer connections + */ + def serverCertChainFile: Option[ExistingFile] + + /** server keep alive settings */ + def keepAliveServer: Option[KeepAliveServerConfig] + + /** maximum inbound message size in bytes on the ledger api and the admin api */ + def maxInboundMessageSize: NonNegativeInt + def toSequencerConnectionConfig: SequencerConnectionConfig.Grpc = + SequencerConnectionConfig.Grpc( + address, + port, + serverCertChainFile.isDefined, + serverCertChainFile.map(f => CertificateFile(f)), + ) + + /** Use the configuration to instantiate the interceptors for this server */ + def instantiateServerInterceptors( + tracingConfig: TracingConfig, + apiLoggingConfig: ApiLoggingConfig, + metricsPrefix: MetricName, + @nowarn("cat=deprecation") metrics: MetricsFactory, + loggerFactory: NamedLoggerFactory, + grpcMetrics: GrpcServerMetrics, + ): CantonServerInterceptors + +} + +trait CommunityServerConfig extends ServerConfig { + override def instantiateServerInterceptors( + tracingConfig: TracingConfig, + apiLoggingConfig: ApiLoggingConfig, + metricsPrefix: MetricName, + @nowarn("cat=deprecation") metrics: MetricsFactory, + loggerFactory: NamedLoggerFactory, + grpcMetrics: GrpcServerMetrics, + ) = new CantonCommunityServerInterceptors( + tracingConfig, + apiLoggingConfig, + loggerFactory, + grpcMetrics, + ) +} + +object ServerConfig { + val defaultMaxInboundMessageSize: NonNegativeInt = NonNegativeInt.tryCreate(10 * 1024 * 1024) +} + +/** A variant of [[ServerConfig]] that by default listens to connections only on the loopback interface. + */ +trait AdminServerConfig extends ServerConfig { + + override val address: String = defaultAddress + + def tls: Option[TlsServerConfig] + + def clientConfig: ClientConfig = + ClientConfig( + address, + port, + tls = tls.map(_.clientConfig), + keepAliveClient = keepAliveServer.map(_.clientConfigFor), + ) + + override def sslContext: Option[SslContext] = tls.map(CantonServerBuilder.sslContext) + + override def serverCertChainFile: Option[ExistingFile] = tls.map(_.certChainFile) +} +object AdminServerConfig { + val defaultAddress: String = "127.0.0.1" +} + +final case class CommunityAdminServerConfig( + override val address: String = defaultAddress, + internalPort: Option[Port] = None, + tls: Option[TlsServerConfig] = None, + keepAliveServer: Option[KeepAliveServerConfig] = Some(KeepAliveServerConfig()), + maxInboundMessageSize: NonNegativeInt = ServerConfig.defaultMaxInboundMessageSize, +) extends AdminServerConfig + with CommunityServerConfig + +/** GRPC keep alive server configuration + * + * @param time Sets the time without read activity before sending a keepalive ping. Do not set to small numbers (default is 40s) + * Corresponds to [[https://grpc.github.io/grpc-java/javadoc/io/grpc/netty/NettyServerBuilder.html#keepAliveTime-long-java.util.concurrent.TimeUnit-]] + * + * @param timeout Sets the time waiting for read activity after sending a keepalive ping (default is 20s) + * Corresponds to [[https://grpc.github.io/grpc-java/javadoc/io/grpc/netty/NettyServerBuilder.html#keepAliveTimeout-long-java.util.concurrent.TimeUnit-]] + * + * @param permitKeepAliveTime Sets the most aggressive keep-alive time that clients are permitted to configure (default is 20s) + * Corresponds to [[https://grpc.github.io/grpc-java/javadoc/io/grpc/netty/NettyServerBuilder.html#permitKeepAliveTime-long-java.util.concurrent.TimeUnit-]] + */ +final case class KeepAliveServerConfig( + time: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(40), + timeout: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(20), + permitKeepAliveTime: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(20), +) { + + /** A sensible default choice of client config for the given server config */ + def clientConfigFor: KeepAliveClientConfig = { + val clientKeepAliveTime = permitKeepAliveTime.max(time) + KeepAliveClientConfig(clientKeepAliveTime, timeout) + } +} + +/** GRPC keep alive client configuration + * + * Settings according to [[https://grpc.github.io/grpc-java/javadoc/io/grpc/ManagedChannelBuilder.html#keepAliveTime-long-java.util.concurrent.TimeUnit-]] + * + * @param time Sets the time without read activity before sending a keepalive ping. Do not set to small numbers (default is 40s) + * @param timeout Sets the time waiting for read activity after sending a keepalive ping (default is 20s) + */ +final case class KeepAliveClientConfig( + time: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(40), + timeout: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(20), +) + +sealed trait ApiType extends PrettyPrinting { + def toProtoEnum: v0.SequencerApiType +} + +object ApiType { + case object Grpc extends ApiType { + def toProtoEnum: v0.SequencerApiType = v0.SequencerApiType.Grpc + override def pretty: Pretty[Grpc.type] = prettyOfObject[Grpc.type] + } + + def fromProtoEnum( + field: String, + apiTypeP: v0.SequencerApiType, + ): ParsingResult[ApiType] = + apiTypeP match { + case v0.SequencerApiType.Grpc => Right(Grpc) + case v0.SequencerApiType.Unrecognized(value) => + Left(ProtoDeserializationError.UnrecognizedEnum(field, value)) + } +} + +/** A client configuration to a corresponding server configuration */ +final case class ClientConfig( + address: String = "127.0.0.1", + port: Port, + tls: Option[TlsClientConfig] = None, + keepAliveClient: Option[KeepAliveClientConfig] = Some(KeepAliveClientConfig()), +) + +sealed trait BaseTlsArguments { + def certChainFile: ExistingFile + def privateKeyFile: ExistingFile + def minimumServerProtocolVersion: Option[String] + def ciphers: Option[Seq[String]] + + def protocols: Option[Seq[String]] = + minimumServerProtocolVersion.map { minVersion => + val knownTlsVersions = + Seq( + TlsVersion.V1.version, + TlsVersion.V1_1.version, + TlsVersion.V1_2.version, + TlsVersion.V1_3.version, + ) + knownTlsVersions + .find(_ == minVersion) + .fold[Seq[String]]( + throw new IllegalArgumentException(s"Unknown TLS protocol version ${minVersion}") + )(versionFound => knownTlsVersions.filter(_ >= versionFound)) + } +} + +/** A wrapper for TLS related server parameters supporting mutual authentication. + * + * Certificates and keys must be provided in the PEM format. + * It is recommended to create them with OpenSSL. + * Other formats (such as GPG) may also work, but have not been tested. + * + * @param certChainFile a file containing a certificate chain, + * containing the certificate chain from the server to the root CA. + * The certificate chain is used to authenticate the server. + * The order of certificates in the chain matters, i.e., it must start with the + * server certificate and end with the root certificate. + * @param privateKeyFile a file containing the server's private key. + * The key must not use a password. + * @param trustCollectionFile a file containing certificates of all nodes the server trusts. + * Used for client authentication. + * It depends on the enclosing configuration whether client authentication is mandatory, + * optional or unsupported. + * If client authentication is enabled and this parameter is absent, + * the certificates in the JVM trust store will be used instead. + * @param secretsUrl URL of a secrets service that provide parameters needed to decrypt the private key. + * Required when private key is encrypted (indicated by '.enc' filename suffix). + * @param clientAuth indicates whether server requires, requests, does does not request auth from clients. + * Normally the ledger api server requires client auth under TLS, but using this setting this + * requirement can be loosened. + * See https://github.com/digital-asset/daml/commit/edd73384c427d9afe63bae9d03baa2a26f7b7f54 + * @param minimumServerProtocolVersion minimum supported TLS protocol. Set None (or null in config file) to default to JVM settings. + * @param ciphers supported ciphers. Set to None (or null in config file) to default to JVM settings. + * @param enableCertRevocationChecking whether to enable certificate revocation checking per + * https://tersesystems.com/blog/2014/03/22/fixing-certificate-revocation/ + * TODO(#4881): implement cert-revocation at the participant and domain admin endpoints + * Ledger api server reference PR: https://github.com/digital-asset/daml/pull/7965 + */ +// Information in this ScalaDoc comment has been taken from https://grpc.io/docs/guides/auth/. +final case class TlsServerConfig( + certChainFile: ExistingFile, + privateKeyFile: ExistingFile, + trustCollectionFile: Option[ExistingFile] = None, + secretsUrl: Option[String] = None, + clientAuth: ServerAuthRequirementConfig = ServerAuthRequirementConfig.Optional, + minimumServerProtocolVersion: Option[String] = Some( + TlsServerConfig.defaultMinimumServerProtocol + ), + ciphers: Option[Seq[String]] = TlsServerConfig.defaultCiphers, + enableCertRevocationChecking: Boolean = false, +) extends BaseTlsArguments { + lazy val clientConfig: TlsClientConfig = { + val clientCert = clientAuth match { + case ServerAuthRequirementConfig.Require(cert) => Some(cert) + case _ => None + } + TlsClientConfig(trustCollectionFile = Some(certChainFile), clientCert = clientCert) + } +} + +object TlsServerConfig { + // default OWASP strong cipher set with broad compatibility (B list) + // https://cheatsheetseries.owasp.org/cheatsheets/TLS_Cipher_String_Cheat_Sheet.html + lazy val defaultCiphers = { + val candidates = Seq( + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + "TLS_AES_128_GCM_SHA256", + "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256", + "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + ) + val logger = LoggerFactory.getLogger(TlsServerConfig.getClass) + val filtered = candidates.filter(x => { + io.netty.handler.ssl.OpenSsl.availableOpenSslCipherSuites().contains(x) || + io.netty.handler.ssl.OpenSsl.availableJavaCipherSuites().contains(x) + }) + if (filtered.isEmpty) { + val len = io.netty.handler.ssl.OpenSsl + .availableOpenSslCipherSuites() + .size() + io.netty.handler.ssl.OpenSsl + .availableJavaCipherSuites() + .size() + logger.warn( + s"All of Canton's default TLS ciphers are unsupported by your JVM (netty reports $len ciphers). Defaulting to JVM settings." + ) + if (!io.netty.handler.ssl.OpenSsl.isAvailable) { + logger.info( + "Netty OpenSSL is not available because of an issue", + io.netty.handler.ssl.OpenSsl.unavailabilityCause(), + ) + } + None + } else { + logger.debug( + s"Using ${filtered.length} out of ${candidates.length} Canton's default TLS ciphers" + ) + Some(filtered) + } + } + + val defaultMinimumServerProtocol = "TLSv1.2" + +} + +/** A wrapper for TLS server parameters supporting only server side authentication + * + * Same parameters as the more complete `TlsServerConfig` + */ +final case class TlsBaseServerConfig( + certChainFile: ExistingFile, + privateKeyFile: ExistingFile, + minimumServerProtocolVersion: Option[String] = Some( + TlsServerConfig.defaultMinimumServerProtocol + ), + ciphers: Option[Seq[String]] = TlsServerConfig.defaultCiphers, +) extends BaseTlsArguments + +/** A wrapper for TLS related client configurations + * + * @param trustCollectionFile a file containing certificates of all nodes the client trusts. If none is specified, defaults to the JVM trust store + * @param clientCert the client certificate + */ +final case class TlsClientConfig( + trustCollectionFile: Option[ExistingFile], + clientCert: Option[TlsClientCertificate], +) + +/** + */ +final case class TlsClientCertificate(certChainFile: File, privateKeyFile: File) + +/** Configuration on whether server requires auth, requests auth, or no auth */ +sealed trait ServerAuthRequirementConfig { + def clientAuth: ClientAuth +} +object ServerAuthRequirementConfig { + + /** A variant of [[ServerAuthRequirementConfig]] by which the server requires auth from clients */ + final case class Require(adminClient: TlsClientCertificate) extends ServerAuthRequirementConfig { + val clientAuth = ClientAuth.REQUIRE + } + + /** A variant of [[ServerAuthRequirementConfig]] by which the server merely requests auth from clients */ + case object Optional extends ServerAuthRequirementConfig { + val clientAuth = ClientAuth.OPTIONAL + } + + /** A variant of [[ServerAuthRequirementConfig]] by which the server does not even request auth from clients */ + case object None extends ServerAuthRequirementConfig { + val clientAuth = ClientAuth.NONE + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/StorageConfig.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/StorageConfig.scala new file mode 100644 index 0000000000..90fef7ae2a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/StorageConfig.scala @@ -0,0 +1,423 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +import com.digitalasset.canton.concurrent.Threading +import com.digitalasset.canton.config.RequireTypes.{PositiveInt, PositiveNumeric} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.logging.{NamedLogging, TracedLogger} +import com.digitalasset.canton.tracing.NoTracing +import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory} + +import scala.jdk.CollectionConverters.* + +/** Various database related settings + * + * @param maxConnections Allows for setting the maximum number of db connections used by Canton and the ledger API server. + * If None or non-positive, the value will be auto-detected from the number of processors. + * Has no effect, if the number of connections is already set via slick options + * (i.e., `config.numThreads`). + * @param connectionAllocation Overrides for the sizes of the connection pools managed by a canton node. + * @param failFastOnStartup If true, the node will fail-fast when the database cannot be connected to + * If false, the node will wait indefinitely for the database to come up + * @param migrationsPaths Where should database migrations be read from. Enables specialized DDL for different database servers (e.g. Postgres, Oracle). + * @param ledgerApiJdbcUrl Canton attempts to generate appropriate configuration for the daml ledger-api to persist the data it requires. + * In most circumstances this should be sufficient and there is no need to override this. + * However if this generation fails or an advanced configuration is required, the ledger-api jdbc url can be + * explicitly configured using this property. + * The jdbc url **must** specify the schema of `ledger_api` (using h2 parameter `schema` or postgres parameter `currentSchema`). + * This property is not used by a domain node as it does not run a ledger-api instance, + * and will be ignored if the node is configured with in-memory persistence. + * @param connectionTimeout How long to wait for acquiring a database connection + * @param warnOnSlowQuery Optional time when we start logging a query as slow. + * @param warnOnSlowQueryInterval How often to repeat the logging statement for slow queries. + * @param unsafeCleanOnValidationError TO BE USED ONLY FOR TESTING! Clean the database if validation during DB migration fails. + * @param unsafeBaselineOnMigrate TO BE USED ONLY FOR TESTING! + *

Whether to automatically call baseline when migrate is executed against a non-empty schema with no schema history table. + * This schema will then be baselined with the {@code baselineVersion} before executing the migrations. + * Only migrations above {@code baselineVersion} will then be applied.

+ *

This is useful for databases projects where the initial vendor schema is not empty

+ * If baseline should be called on migrate for non-empty schemas, { @code false} if not. (default: { @code false}) + * @param migrateAndStart if true, db migrations will be applied to the database (default is to abort start if db migrates are pending to force an explicit updgrade) + */ +final case class DbParametersConfig( + maxConnections: Option[Int] = None, + connectionAllocation: ConnectionAllocation = ConnectionAllocation(), + failFastOnStartup: Boolean = true, + migrationsPaths: Seq[String] = Seq.empty, + ledgerApiJdbcUrl: Option[String] = None, + connectionTimeout: NonNegativeFiniteDuration = DbConfig.defaultConnectionTimeout, + warnOnSlowQuery: Option[PositiveFiniteDuration] = None, + warnOnSlowQueryInterval: PositiveFiniteDuration = + DbParametersConfig.defaultWarnOnSlowQueryInterval, + unsafeCleanOnValidationError: Boolean = false, + unsafeBaselineOnMigrate: Boolean = false, + migrateAndStart: Boolean = false, +) extends PrettyPrinting { + override def pretty: Pretty[DbParametersConfig] = + prettyOfClass( + paramIfDefined( + "migrationsPaths", + x => + if (x.migrationsPaths.nonEmpty) + Some(x.migrationsPaths.map(_.doubleQuoted)) + else None, + ), + paramIfDefined("ledgerApiJdbcUrl", _.ledgerApiJdbcUrl.map(_.doubleQuoted)), + paramIfDefined("maxConnections", _.maxConnections), + param("connectionAllocation", _.connectionAllocation), + param("failFast", _.failFastOnStartup), + paramIfDefined("warnOnSlowQuery", _.warnOnSlowQuery), + ) +} + +/** Various settings to control batching behaviour related to db queries + * + * @param maxItemsInSqlClause maximum number of items to place in sql "in clauses" + * @param parallelism number of parallel queries to the db. defaults to 8 + */ +final case class BatchingConfig( + maxItemsInSqlClause: PositiveNumeric[Int] = BatchingConfig.defaultMaxItemsInSqlClause, + parallelism: PositiveNumeric[Int] = BatchingConfig.defaultBatchingParallelism, +) + +object BatchingConfig { + private val defaultMaxItemsInSqlClause: PositiveNumeric[Int] = PositiveNumeric.tryCreate(100) + private val defaultBatchingParallelism: PositiveNumeric[Int] = PositiveNumeric.tryCreate(8) +} + +final case class ConnectionAllocation( + numReads: Option[PositiveInt] = None, + numWrites: Option[PositiveInt] = None, + numLedgerApi: Option[PositiveInt] = None, +) extends PrettyPrinting { + override def pretty: Pretty[ConnectionAllocation] = + prettyOfClass( + paramIfDefined("numReads", _.numReads), + paramIfDefined("numWrites", _.numWrites), + paramIfDefined("numLedgerApi", _.numLedgerApi), + ) +} + +object DbParametersConfig { + private val defaultWarnOnSlowQueryInterval: PositiveFiniteDuration = + PositiveFiniteDuration.ofSeconds(5) +} + +trait StorageConfig { + type Self <: StorageConfig + + /** Database specific configuration parameters used by Slick. + * Also available for in-memory storage to support easy switching between in-memory and database storage. + */ + def config: Config + + /** General database related parameters. */ + def parameters: DbParametersConfig + + private def maxConnectionsOrDefault: Int = { + // The following is an educated guess of a sane default for the number of DB connections. + // https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing + parameters.maxConnections match { + case Some(value) if value > 0 => value + case _ => Threading.detectNumberOfThreads(NamedLogging.noopNoTracingLogger) + } + } + + /** Returns the size of the Canton read connection pool for the given usage. + * + * @param forParticipant True if the connection pool is used by a participant, then we reserve connections for the ledger API server. + * @param withWriteConnectionPool True for a replicated node's write connection pool, then we split the available connections between the read and write pools. + * @param withMainConnection True for accounting an additional connection (write connection, or main connection with lock) + */ + def numReadConnectionsCanton( + forParticipant: Boolean, + withWriteConnectionPool: Boolean, + withMainConnection: Boolean, + ): PositiveInt = + parameters.connectionAllocation.numReads.getOrElse( + numConnectionsCanton(forParticipant, withWriteConnectionPool, withMainConnection) + ) + + /** Returns the size of the Canton write connection pool for the given usage. + * + * @param forParticipant True if the connection pool is used by a participant, then we reserve connections for the ledger API server. + * @param withWriteConnectionPool True for a replicated node's write connection pool, then we split the available connections between the read and write pools. + * @param withMainConnection True for accounting an additional connection (write connection, or main connection with lock) + */ + def numWriteConnectionsCanton( + forParticipant: Boolean, + withWriteConnectionPool: Boolean, + withMainConnection: Boolean, + ): PositiveInt = + parameters.connectionAllocation.numWrites.getOrElse( + numConnectionsCanton(forParticipant, withWriteConnectionPool, withMainConnection) + ) + + /** Returns the size of the combined Canton read+write connection pool for the given usage. + * + * @param forParticipant True if the connection pool is used by a participant, then we reserve connections for the ledger API server. + * @param withWriteConnectionPool True for a replicated node's write connection pool, then we split the available connections between the read and write pools. + * @param withMainConnection True for accounting an additional connection (write connection, or main connection with lock) + */ + def numCombinedConnectionsCanton( + forParticipant: Boolean, + withWriteConnectionPool: Boolean, + withMainConnection: Boolean, + ): PositiveInt = + (parameters.connectionAllocation.numWrites.toList ++ parameters.connectionAllocation.numReads.toList) + .reduceOption(_ + _) + .getOrElse( + numConnectionsCanton(forParticipant, withWriteConnectionPool, withMainConnection) + ) + + /** Returns the size of the Canton connection pool for the given usage. + * + * @param forParticipant True if the connection pool is used by a participant, then we reserve connections for the ledger API server. + * @param withWriteConnectionPool True for a replicated node's write connection pool, then we split the available connections between the read and write pools. + * @param withMainConnection True for accounting an additional connection (write connection, or main connection with lock) + */ + private def numConnectionsCanton( + forParticipant: Boolean, + withWriteConnectionPool: Boolean, + withMainConnection: Boolean, + ): PositiveInt = { + val c = maxConnectionsOrDefault + + // A participant evenly shares the max connections between the ledger API server (not indexer) and canton + val totalConnectionPoolSize = if (forParticipant) c / 2 else c + + // For replicated nodes we have an additional connection pool for writes. Split evenly between reads and writes. + val replicatedConnectionPoolSize = + if (withWriteConnectionPool) totalConnectionPoolSize / 2 else totalConnectionPoolSize + + val resultMaxConnections = if (withMainConnection) { + // The write connection pool for replicated nodes require an additional connection outside of the pool + (replicatedConnectionPoolSize - 1) + } else + replicatedConnectionPoolSize + + // Return at least one connection + PositiveInt.tryCreate(resultMaxConnections max 1) + } + + /** Max connections for the Ledger API server. The Ledger API indexer's max connections are configured separately. */ + def numConnectionsLedgerApiServer: PositiveInt = + parameters.connectionAllocation.numLedgerApi.getOrElse( + // The Ledger Api Server always gets half of the max connections allocated to canton + PositiveInt.tryCreate(maxConnectionsOrDefault / 2 max 1) + ) +} + +/** Determines how a node stores persistent data. + */ +sealed trait CommunityStorageConfig extends StorageConfig + +trait MemoryStorageConfig extends StorageConfig + +object CommunityStorageConfig { + + /** Dictates that persistent data is stored in memory. + * So in fact, the data is not persistent. It is deleted whenever the node is stopped. + * + * @param config IGNORED configuration option, used to allow users to use configuration mixins with postgres and h2 + */ + final case class Memory( + override val config: Config = ConfigFactory.empty(), + override val parameters: DbParametersConfig = DbParametersConfig(), + ) extends CommunityStorageConfig + with MemoryStorageConfig { + override type Self = Memory + + } +} + +/** Dictates that persistent data is stored in a database. + */ +trait DbConfig extends StorageConfig with PrettyPrinting { + + /** Function to combine the defined migration path together with dev version changes */ + final def buildMigrationsPaths(devVersionSupport: Boolean): Seq[String] = { + if (parameters.migrationsPaths.nonEmpty) + parameters.migrationsPaths + else if (devVersionSupport) + Seq(stableMigrationPath, devMigrationPath) + else Seq(stableMigrationPath) + } + + protected def devMigrationPath: String + protected def stableMigrationPath: String + + override def pretty: Pretty[DbConfig] = + prettyOfClass( + param( + "config", + _.config.toString.replaceAll("\"password\":\".*?\"", "\"password\":\"???\"").unquoted, + ), + param("parameters", _.parameters), + ) +} + +trait H2DbConfig extends DbConfig { + def databaseName: Option[String] = { + if (config.hasPath("url")) { + val url = config.getString("url") + "(:mem:|:file:)([^:;]+)([:;])".r.findFirstMatchIn(url).map(_.group(2)) + } else None + } + private val defaultDriver: String = "org.h2.Driver" + val defaultConfig: Config = DbConfig.toConfig(Map("driver" -> defaultDriver)) +} + +trait PostgresDbConfig extends DbConfig + +sealed trait CommunityDbConfig extends CommunityStorageConfig with DbConfig + +object CommunityDbConfig { + final case class H2( + override val config: Config, + override val parameters: DbParametersConfig = DbParametersConfig(), + ) extends CommunityDbConfig + with H2DbConfig { + override type Self = H2 + + protected val devMigrationPath: String = DbConfig.h2MigrationsPathDev + protected val stableMigrationPath: String = DbConfig.h2MigrationsPathStable + + } + + final case class Postgres( + override val config: Config, + override val parameters: DbParametersConfig = DbParametersConfig(), + ) extends CommunityDbConfig + with PostgresDbConfig { + override type Self = Postgres + + protected def devMigrationPath: String = DbConfig.postgresMigrationsPathDev + protected val stableMigrationPath: String = DbConfig.postgresMigrationsPathStable + + } +} + +object DbConfig extends NoTracing { + + val defaultConnectionTimeout: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(5) + + private val stableDir = "stable" + private val devDir = "dev" + private val basePostgresMigrationsPath: String = "classpath:db/migration/canton/postgres/" + private val baseH2MigrationsPath: String = "classpath:db/migration/canton/h2/" + private val baseOracleMigrationPath: String = "classpath:db/migration/canton/oracle/" + val postgresMigrationsPathStable: String = basePostgresMigrationsPath + stableDir + val h2MigrationsPathStable: String = baseH2MigrationsPath + stableDir + val oracleMigrationPathStable: String = baseOracleMigrationPath + stableDir + val postgresMigrationsPathDev: String = basePostgresMigrationsPath + devDir + val h2MigrationsPathDev: String = baseH2MigrationsPath + devDir + val oracleMigrationPathDev: String = baseOracleMigrationPath + devDir + + def postgresUrl(host: String, port: Int, dbName: String): String = + s"jdbc:postgresql://$host:$port/$dbName" + + def h2Url(dbName: String): String = + s"jdbc:h2:mem:$dbName;MODE=PostgreSQL;LOCK_TIMEOUT=10000;DB_CLOSE_DELAY=-1" + + def oracleUrl(host: String, port: Int, dbName: String): String = + s"jdbc:oracle:thin:@$host:$port/$dbName" + + def oracleUrl( + host: String, + port: Int, + dbName: String, + username: String, + password: String, + ): String = + s"jdbc:oracle:thin:$username/$password@$host:$port/$dbName" + + def toConfig(map: Map[String, Any]): Config = ConfigFactory.parseMap(map.asJava) + + /** Apply default values to the given db config + */ + def configWithFallback( + dbConfig: DbConfig + )( + numThreads: PositiveInt, + poolName: String, + logger: TracedLogger, + ): Config = { + val commonDefaults = toConfig( + Map( + "poolName" -> poolName, + "numThreads" -> numThreads.unwrap, + "connectionTimeout" -> dbConfig.parameters.connectionTimeout.unwrap.toMillis, + "initializationFailTimeout" -> 1, // Must be greater than 0 to force a connection validation on startup + ) + ) + (dbConfig match { + case h2: H2DbConfig => + def containsOption(c: Config, optionName: String, optionValue: String) = { + val propertiesPath = s"properties.$optionName" + val valueIsInProperties = + c.hasPath(propertiesPath) && c.getString(propertiesPath).contains(optionValue) + val valueIsInUrl = assertOnString(c, "url", _.contains(s"$optionName=$optionValue")) + valueIsInProperties || valueIsInUrl + } + def enforcePgMode(c: Config): Config = + if (!containsOption(c, "MODE", "PostgreSQL")) { + logger.warn( + "Given H2 config did not contain PostgreSQL compatibility mode. Automatically added it." + ) + c.withValue("properties.MODE", ConfigValueFactory.fromAnyRef("PostgreSQL")) + } else c + def enforceDelayClose(c: Config): Config = { + val isInMemory = + assertOnString(c, "url", _.contains(":mem:")) + if (isInMemory && !containsOption(c, "DB_CLOSE_DELAY", "-1")) { + logger.warn( + s"Given H2 config is in-memory and does not contain DB_CLOSE_DELAY=-1. Automatically added this to avoid accidentally losing all data. $c" + ) + c.withValue("properties.DB_CLOSE_DELAY", ConfigValueFactory.fromAnyRef("-1")) + } else c + } + def enforceSingleConnection(c: Config): Config = { + if (!c.hasPath("numThreads") || c.getInt("numThreads") != 1) { + logger.info("Overriding numThreads to 1 to avoid concurrency issues.") + } + c.withValue("numThreads", ConfigValueFactory.fromAnyRef(1)) + } + enforceDelayClose( + enforcePgMode(enforceSingleConnection(writeH2UrlIfNotSet(h2.config))) + ).withFallback(h2.defaultConfig) + case postgres: PostgresDbConfig => postgres.config + // TODO(i11009): this other is a workaround for supporting oracle without referencing the oracle config + case other => other.config + }).withFallback(commonDefaults) + } + + private def assertOnString(c: Config, path: String, check: String => Boolean): Boolean = + c.hasPath(path) && check(c.getString(path)) + + /** if the URL is not set, we build one here (assuming that config.properties.databaseName is set and should be used as the file name) */ + def writeH2UrlIfNotSet(c: Config): Config = { + val noUrlConfigured = !assertOnString(c, "url", _.nonEmpty) + if (noUrlConfigured && c.hasPath("properties.databaseName")) { + val url = "jdbc:h2:file:./" + c.getString( + "properties.databaseName" + ) + ";MODE=PostgreSQL;LOCK_TIMEOUT=10000;DB_CLOSE_DELAY=-1" + c.withValue("url", ConfigValueFactory.fromAnyRef(url)) + } else + c + } + + /** strip the password and the url out of the config object */ + def hideConfidential(config: Config): Config = { + val hidden = ConfigValueFactory.fromAnyRef("****") + val replace = Seq("password", "properties.password", "url", "properties.url") + replace.foldLeft(config) { case (acc, path) => + if (acc.hasPath(path)) + acc.withValue(path, hidden) + else acc + } + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/TestingConfigInternal.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/TestingConfigInternal.scala new file mode 100644 index 0000000000..176e468b1e --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/TestingConfigInternal.scala @@ -0,0 +1,30 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +import com.digitalasset.canton.metrics.MetricsFactoryType +import com.digitalasset.canton.metrics.MetricsFactoryType.External + +/** Used to set parameters for testing when these don't need to be exposed in a config file. + * + * @param testSequencerClientFor: members that should use a + * [[com.digitalasset.canton.sequencing.client.DelayedSequencerClient]] for testing + * @param initializeGlobalOpenTelemetry Determines whether the OpenTelemetry instance we build is set as the global OpenTelemetry instance. This is set to false during tests to + * prevent failures as the global OpenTelemetry instance can be initialized just once. + */ +final case class TestingConfigInternal( + testSequencerClientFor: Set[TestSequencerClientFor] = Set.empty, + metricsFactoryType: MetricsFactoryType = External, + initializeGlobalOpenTelemetry: Boolean = true, +) + +/** @param environmentId ID used to disambiguate tests running in parallel + * @param memberName The name of the member that should use a delayed sequencer client + * @param domainName The name of the domain for which the member should use a delayed sequencer client + */ +final case class TestSequencerClientFor( + environmentId: String, + memberName: String, + domainName: String, +) diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/TimeProofRequestConfig.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/TimeProofRequestConfig.scala new file mode 100644 index 0000000000..110084467d --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/config/TimeProofRequestConfig.scala @@ -0,0 +1,78 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +import cats.syntax.option.* +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.time.admin.v0 + +/** @param initialRetryDelay The initial retry delay if the request to send a sequenced event fails + * @param maxRetryDelay The max retry delay if the request to send a sequenced event fails + * @param maxSequencingDelay If our request for a sequenced event was successful, how long should we wait + * to observe it from the sequencer before starting a new request. + */ +final case class TimeProofRequestConfig( + initialRetryDelay: NonNegativeFiniteDuration = TimeProofRequestConfig.defaultInitialRetryDelay, + maxRetryDelay: NonNegativeFiniteDuration = TimeProofRequestConfig.defaultMaxRetryDelay, + maxSequencingDelay: NonNegativeFiniteDuration = TimeProofRequestConfig.defaultMaxSequencingDelay, +) extends PrettyPrinting { + private[config] def toProtoV0: v0.TimeProofRequestConfig = v0.TimeProofRequestConfig( + initialRetryDelay.toProtoPrimitive.some, + maxRetryDelay.toProtoPrimitive.some, + maxSequencingDelay.toProtoPrimitive.some, + ) + override def pretty: Pretty[TimeProofRequestConfig] = prettyOfClass( + paramIfNotDefault( + "initialRetryDelay", + _.initialRetryDelay, + TimeProofRequestConfig.defaultInitialRetryDelay, + ), + paramIfNotDefault( + "maxRetryDelay", + _.maxRetryDelay, + TimeProofRequestConfig.defaultMaxRetryDelay, + ), + paramIfNotDefault( + "maxSequencingDelay", + _.maxSequencingDelay, + TimeProofRequestConfig.defaultMaxSequencingDelay, + ), + ) + +} + +object TimeProofRequestConfig { + + private val defaultInitialRetryDelay: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.ofMillis(200) + + private val defaultMaxRetryDelay: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.ofSeconds(5) + + private val defaultMaxSequencingDelay: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.ofSeconds(10) + + private[config] def fromProtoV0( + configP: v0.TimeProofRequestConfig + ): ParsingResult[TimeProofRequestConfig] = + for { + initialRetryDelay <- ProtoConverter.parseRequired( + NonNegativeFiniteDuration.fromProtoPrimitive("initialRetryDelay"), + "initialRetryDelay", + configP.initialRetryDelay, + ) + maxRetryDelay <- ProtoConverter.parseRequired( + NonNegativeFiniteDuration.fromProtoPrimitive("maxRetryDelay"), + "maxRetryDelay", + configP.maxRetryDelay, + ) + maxSequencingDelay <- ProtoConverter.parseRequired( + NonNegativeFiniteDuration.fromProtoPrimitive("maxSequencingDelay"), + "maxSequencingDelay", + configP.maxSequencingDelay, + ) + } yield TimeProofRequestConfig(initialRetryDelay, maxRetryDelay, maxSequencingDelay) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/CryptoApi.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/CryptoApi.scala new file mode 100644 index 0000000000..713aa4ec0f --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/CryptoApi.scala @@ -0,0 +1,187 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto + +import cats.data.EitherT +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.crypto.store.{ + CryptoPrivateStore, + CryptoPrivateStoreError, + CryptoPublicStore, +} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.{FlagCloseable, Lifecycle} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.serialization.DeserializationError +import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex +import com.digitalasset.canton.topology.Member +import com.digitalasset.canton.topology.client.TopologySnapshot +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.{HasVersionedToByteString, ProtocolVersion} +import com.google.protobuf.ByteString + +import scala.concurrent.{ExecutionContext, Future} + +/** Wrapper class to simplify crypto dependency management */ +class Crypto( + val pureCrypto: CryptoPureApi, + val privateCrypto: CryptoPrivateApi, + val cryptoPrivateStore: CryptoPrivateStore, + val cryptoPublicStore: CryptoPublicStore, + val javaKeyConverter: JavaKeyConverter, + override protected val timeouts: ProcessingTimeout, + override protected val loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends NamedLogging + with FlagCloseable { + + /** Helper method to generate a new signing key pair and store the public key in the public store as well. */ + def generateSigningKey( + scheme: SigningKeyScheme = privateCrypto.defaultSigningKeyScheme, + name: Option[KeyName] = None, + )(implicit + traceContext: TraceContext + ): EitherT[Future, SigningKeyGenerationError, SigningPublicKey] = + for { + publicKey <- privateCrypto.generateSigningKey(scheme, name) + _ <- cryptoPublicStore + .storeSigningKey(publicKey, name) + .leftMap[SigningKeyGenerationError](SigningKeyGenerationError.SigningPublicStoreError) + } yield publicKey + + /** Helper method to generate a new encryption key pair and store the public key in the public store as well. */ + def generateEncryptionKey( + scheme: EncryptionKeyScheme = privateCrypto.defaultEncryptionKeyScheme, + name: Option[KeyName] = None, + )(implicit + traceContext: TraceContext + ): EitherT[Future, EncryptionKeyGenerationError, EncryptionPublicKey] = + for { + publicKey <- privateCrypto.generateEncryptionKey(scheme, name) + _ <- cryptoPublicStore + .storeEncryptionKey(publicKey, name) + .leftMap[EncryptionKeyGenerationError]( + EncryptionKeyGenerationError.EncryptionPublicStoreError + ) + } yield publicKey + + override def onClosed(): Unit = Lifecycle.close(cryptoPrivateStore, cryptoPublicStore)(logger) +} + +trait CryptoPureApi + extends EncryptionOps + with SigningOps + with HmacOps + with HkdfOps + with HashOps + with RandomOps +trait CryptoPrivateApi extends EncryptionPrivateOps with SigningPrivateOps +trait CryptoPrivateStoreApi + extends CryptoPrivateApi + with EncryptionPrivateStoreOps + with SigningPrivateStoreOps + +sealed trait SyncCryptoError extends Product with Serializable with PrettyPrinting +object SyncCryptoError { + + /** error thrown if there is no key available as per identity providing service */ + final case class KeyNotAvailable( + owner: Member, + keyPurpose: KeyPurpose, + timestamp: CantonTimestamp, + candidates: Seq[Fingerprint], + ) extends SyncCryptoError { + override def pretty: Pretty[KeyNotAvailable] = prettyOfClass( + param("owner", _.owner), + param("key purpose", _.keyPurpose), + param("timestamp", _.timestamp), + param("candidates", _.candidates), + ) + } + + final case class SyncCryptoSigningError(error: SigningError) extends SyncCryptoError { + override def pretty: Pretty[SyncCryptoSigningError] = prettyOfParam(_.error) + } + + final case class SyncCryptoDecryptionError(error: DecryptionError) extends SyncCryptoError { + override def pretty: Pretty[SyncCryptoDecryptionError] = prettyOfParam(_.error) + } + + final case class SyncCryptoEncryptionError(error: EncryptionError) extends SyncCryptoError { + override def pretty: Pretty[SyncCryptoEncryptionError] = prettyOfParam(_.error) + } + + final case class StoreError(error: CryptoPrivateStoreError) extends SyncCryptoError { + override def pretty: Pretty[StoreError] = + prettyOfClass(unnamedParam(_.error)) + } +} + +// TODO(i8808): consider changing `encryptFor` API to +// `def encryptFor(message: ByteString, member: Member): EitherT[Future, SyncCryptoError, ByteString]` +// architecture-handbook-entry-begin: SyncCryptoApi +/** impure part of the crypto api with access to private key store and knowledge about the current entity to key assoc */ +trait SyncCryptoApi { + + def pureCrypto: CryptoPureApi + + def ipsSnapshot: TopologySnapshot + + /** Signs the given hash using the private signing key. */ + def sign(hash: Hash)(implicit + traceContext: TraceContext + ): EitherT[Future, SyncCryptoError, Signature] + + /** Decrypts a message using the private key of the public key given as the fingerprint. */ + def decrypt[M](encryptedMessage: AsymmetricEncrypted[M])( + deserialize: ByteString => Either[DeserializationError, M] + )(implicit traceContext: TraceContext): EitherT[Future, SyncCryptoError, M] + + @Deprecated + def decrypt[M](encryptedMessage: Encrypted[M])( + deserialize: ByteString => Either[DeserializationError, M] + )(implicit traceContext: TraceContext): EitherT[Future, SyncCryptoError, M] + + /** Verify signature of a given owner + * + * Convenience method to lookup a key of a given owner, domain and timestamp and verify the result. + */ + def verifySignature( + hash: Hash, + signer: Member, + signature: Signature, + ): EitherT[Future, SignatureCheckError, Unit] + + def verifySignatures( + hash: Hash, + signer: Member, + signatures: NonEmpty[Seq[Signature]], + ): EitherT[Future, SignatureCheckError, Unit] + + /** Verifies a list of `signatures` to be produced by active members of a `mediatorGroup`, + * counting each member's signature only once. + * Returns `Right` when the `mediatorGroup`'s threshold is met. + * Can be successful even if some signatures fail the check, logs the errors in that case. + * When the threshold is not met returns `Left` with all the signature check errors. + */ + def verifySignatures( + hash: Hash, + mediatorGroupIndex: MediatorGroupIndex, + signatures: NonEmpty[Seq[Signature]], + )(implicit traceContext: TraceContext): EitherT[Future, SignatureCheckError, Unit] + + /** Encrypts a message for the given member + * + * Utility method to lookup a key on an IPS snapshot and then encrypt the given message with the + * most suitable key for the respective key owner. + */ + def encryptFor[M <: HasVersionedToByteString]( + message: M, + member: Member, + version: ProtocolVersion, + ): EitherT[Future, SyncCryptoError, AsymmetricEncrypted[M]] +} +// architecture-handbook-entry-end: SyncCryptoApi diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/CryptoKeys.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/CryptoKeys.scala new file mode 100644 index 0000000000..38b8d21c44 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/CryptoKeys.scala @@ -0,0 +1,372 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto + +import cats.Order +import cats.syntax.either.* +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.config.CantonRequireTypes.{ + LengthLimitedStringWrapper, + LengthLimitedStringWrapperCompanion, + String300, + String68, +} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.store.db.DbDeserializationException +import com.digitalasset.canton.topology.SafeSimpleString +import com.digitalasset.canton.version.{ + HasVersionedMessageCompanion, + HasVersionedWrapper, + ProtoVersion, + ProtocolVersion, +} +import com.google.protobuf.ByteString +import io.circe.Encoder +import slick.jdbc.{GetResult, SetParameter} + +trait CryptoKey extends Product with Serializable { + def format: CryptoKeyFormat + protected[crypto] def key: ByteString +} + +/** a human readable fingerprint of a key that serves as a unique identifier */ +final case class Fingerprint(protected val str: String68) + extends LengthLimitedStringWrapper + with PrettyPrinting { + def toLengthLimitedString: String68 = str + + override def pretty: Pretty[Fingerprint] = prettyOfParam(_.unwrap.readableHash) +} + +object Fingerprint { + + implicit val fingerprintOrder: Order[Fingerprint] = + Order.by[Fingerprint, String](_.unwrap) + + implicit val setParameterFingerprint: SetParameter[Fingerprint] = (f, pp) => + pp >> f.toLengthLimitedString + implicit val getResultFingerprint: GetResult[Fingerprint] = GetResult { r => + Fingerprint + .fromProtoPrimitive(r.nextString()) + .valueOr(err => + throw new DbDeserializationException(s"Failed to deserialize fingerprint: $err") + ) + } + + implicit val fingerprintEncoder: Encoder[Fingerprint] = + Encoder.encodeString.contramap[Fingerprint](_.unwrap) + + private[this] def apply(hash: Hash): Fingerprint = + throw new UnsupportedOperationException("Use create/deserialization methods instead.") + + /** create fingerprint from a human readable string */ + def fromProtoPrimitive(str: String): ParsingResult[Fingerprint] = + SafeSimpleString + .fromProtoPrimitive(str) + .leftMap(ProtoDeserializationError.StringConversionError) + .flatMap(String68.fromProtoPrimitive(_, "Fingerprint")) + .map(Fingerprint(_)) + + private[crypto] def create( + bytes: ByteString + ): Fingerprint = { + val hash = Hash.digest(HashPurpose.PublicKeyFingerprint, bytes, HashAlgorithm.Sha256) + new Fingerprint(hash.toLengthLimitedHexString) + } + + def tryCreate(str: String): Fingerprint = + fromProtoPrimitive(str).valueOr(err => + throw new IllegalArgumentException(s"Invalid fingerprint $str: $err") + ) +} + +trait CryptoKeyPairKey extends CryptoKey { + def id: Fingerprint + + def isPublicKey: Boolean +} + +trait CryptoKeyPair[+PK <: PublicKey, +SK <: PrivateKey] + extends HasVersionedWrapper[CryptoKeyPair[PublicKey, PrivateKey]] { + + require( + publicKey.id == privateKey.id, + "Public and private key of the same key pair must have the same ids.", + ) + + override protected def companionObj = CryptoKeyPair + + def publicKey: PK + def privateKey: SK + + // The keypair is identified by the public key's id + def id: Fingerprint = publicKey.id + + protected def toProtoCryptoKeyPairPairV0: v0.CryptoKeyPair.Pair + + def toProtoCryptoKeyPairV0: v0.CryptoKeyPair = v0.CryptoKeyPair(toProtoCryptoKeyPairPairV0) +} + +object CryptoKeyPair extends HasVersionedMessageCompanion[CryptoKeyPair[PublicKey, PrivateKey]] { + + override def name: String = "crypto key pair" + + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> ProtoCodec( + ProtocolVersion.v30, + supportedProtoVersion(v0.CryptoKeyPair)(fromProtoCryptoKeyPairV0), + _.toProtoCryptoKeyPairV0.toByteString, + ) + ) + + def fromProtoCryptoKeyPairV0( + keyPair: v0.CryptoKeyPair + ): ParsingResult[CryptoKeyPair[_ <: PublicKey, _ <: PrivateKey]] = + for { + pair <- keyPair.pair match { + case v0.CryptoKeyPair.Pair.EncryptionKeyPair(value) => + EncryptionKeyPair + .fromProtoV0(value): Either[ + ProtoDeserializationError, + CryptoKeyPair[EncryptionPublicKey, EncryptionPrivateKey], + ] + case v0.CryptoKeyPair.Pair.SigningKeyPair(value) => + SigningKeyPair + .fromProtoV0(value): Either[ + ProtoDeserializationError, + CryptoKeyPair[SigningPublicKey, SigningPrivateKey], + ] + case v0.CryptoKeyPair.Pair.Empty => + Left(ProtoDeserializationError.FieldNotSet("pair")) + } + } yield pair +} + +trait PublicKey extends CryptoKeyPairKey { + def toByteString(version: ProtocolVersion): ByteString + + def fingerprint: Fingerprint = id + + def purpose: KeyPurpose + + def isSigning: Boolean = purpose == KeyPurpose.Signing + + override def isPublicKey: Boolean = true + + protected def toProtoPublicKeyKeyV0: v0.PublicKey.Key + + /** With the v0.PublicKey message we model the class hierarchy of public keys in protobuf. + * Each child class that implements this trait can be serialized with `toProto` to their corresponding protobuf + * message. With the following method, it can be serialized to this trait's protobuf message. + */ + def toProtoPublicKeyV0: v0.PublicKey = v0.PublicKey(key = toProtoPublicKeyKeyV0) +} + +object PublicKey { + def fromProtoPublicKeyV0(publicKeyP: v0.PublicKey): ParsingResult[PublicKey] = + publicKeyP.key match { + case v0.PublicKey.Key.Empty => Left(ProtoDeserializationError.FieldNotSet("key")) + case v0.PublicKey.Key.EncryptionPublicKey(encPubKeyP) => + EncryptionPublicKey.fromProtoV0(encPubKeyP) + case v0.PublicKey.Key.SigningPublicKey(signPubKeyP) => + SigningPublicKey.fromProtoV0(signPubKeyP) + } + +} + +final case class KeyName(protected val str: String300) + extends LengthLimitedStringWrapper + with PrettyPrinting { + def emptyStringAsNone: Option[KeyName] = if (str.unwrap.isEmpty) None else Some(this) + override def pretty: Pretty[KeyName] = prettyOfClass(unnamedParam(_.str.unwrap.unquoted)) +} +object KeyName extends LengthLimitedStringWrapperCompanion[String300, KeyName] { + override def instanceName: String = "KeyName" + override protected def companion: String300.type = String300 + override protected def factoryMethodWrapper(str: String300): KeyName = KeyName(str) +} + +trait PublicKeyWithName + extends Product + with Serializable + with HasVersionedWrapper[PublicKeyWithName] { + type K <: PublicKey + def publicKey: K + def name: Option[KeyName] + + override protected def companionObj = PublicKeyWithName + + def toProtoV0: v0.PublicKeyWithName = + v0.PublicKeyWithName( + publicKey = Some( + publicKey.toProtoPublicKeyV0 + ), + name = name.map(_.unwrap).getOrElse(""), + ) +} + +object PublicKeyWithName extends HasVersionedMessageCompanion[PublicKeyWithName] { + + override def name: String = "PublicKeyWithName" + + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> ProtoCodec( + ProtocolVersion.v30, + supportedProtoVersion(v0.PublicKeyWithName)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + def fromProtoV0(key: v0.PublicKeyWithName): ParsingResult[PublicKeyWithName] = + for { + publicKey <- ProtoConverter.parseRequired( + PublicKey.fromProtoPublicKeyV0, + "public_key", + key.publicKey, + ) + name <- KeyName.fromProtoPrimitive(key.name) + } yield { + (publicKey: @unchecked) match { + case k: SigningPublicKey => SigningPublicKeyWithName(k, name.emptyStringAsNone) + case k: EncryptionPublicKey => EncryptionPublicKeyWithName(k, name.emptyStringAsNone) + } + } +} + +// The private key id must match the corresponding public key's one +trait PrivateKey extends CryptoKeyPairKey { + def purpose: KeyPurpose + + override def isPublicKey: Boolean = false + + protected def toProtoPrivateKeyKeyV0: v0.PrivateKey.Key + + /** Same representation of the class hierarchy in protobuf messages, see [[PublicKey]]. */ + def toProtoPrivateKey: v0.PrivateKey = v0.PrivateKey(key = toProtoPrivateKeyKeyV0) +} + +object PrivateKey { + + def fromProtoPrivateKey( + privateKeyP: v0.PrivateKey + ): ParsingResult[PrivateKey] = + privateKeyP.key match { + case v0.PrivateKey.Key.Empty => Left(ProtoDeserializationError.FieldNotSet("key")) + case v0.PrivateKey.Key.EncryptionPrivateKey(encPrivKeyP) => + EncryptionPrivateKey.fromProtoV0(encPrivKeyP) + case v0.PrivateKey.Key.SigningPrivateKey(signPrivKeyP) => + SigningPrivateKey.fromProtoV0(signPrivKeyP) + } + +} + +sealed trait CryptoKeyFormat extends Product with Serializable with PrettyPrinting { + def name: String + def toProtoEnum: v0.CryptoKeyFormat + override def pretty: Pretty[this.type] = prettyOfString(_.name) +} + +object CryptoKeyFormat { + + implicit val cryptoKeyFormatOrder: Order[CryptoKeyFormat] = + Order.by[CryptoKeyFormat, String](_.name) + + case object Tink extends CryptoKeyFormat { + override val name: String = "Tink" + override def toProtoEnum: v0.CryptoKeyFormat = v0.CryptoKeyFormat.Tink + } + + case object Der extends CryptoKeyFormat { + override val name: String = "DER" + override def toProtoEnum: v0.CryptoKeyFormat = v0.CryptoKeyFormat.Der + } + + case object Raw extends CryptoKeyFormat { + override val name: String = "Raw" + override def toProtoEnum: v0.CryptoKeyFormat = v0.CryptoKeyFormat.Raw + } + + case object Symbolic extends CryptoKeyFormat { + override val name: String = "Symbolic" + override def toProtoEnum: v0.CryptoKeyFormat = v0.CryptoKeyFormat.Symbolic + } + + def fromProtoEnum( + field: String, + formatP: v0.CryptoKeyFormat, + ): ParsingResult[CryptoKeyFormat] = + formatP match { + case v0.CryptoKeyFormat.MissingCryptoKeyFormat => + Left(ProtoDeserializationError.FieldNotSet(field)) + case v0.CryptoKeyFormat.Unrecognized(value) => + Left(ProtoDeserializationError.UnrecognizedEnum(field, value)) + case v0.CryptoKeyFormat.Tink => Right(CryptoKeyFormat.Tink) + case v0.CryptoKeyFormat.Der => Right(CryptoKeyFormat.Der) + case v0.CryptoKeyFormat.Raw => Right(CryptoKeyFormat.Raw) + case v0.CryptoKeyFormat.Symbolic => Right(CryptoKeyFormat.Symbolic) + } +} + +sealed trait KeyPurpose extends Product with Serializable with PrettyPrinting { + + def name: String + + // An identifier for a key purpose that is used for serialization + def id: Byte + + def toProtoEnum: v0.KeyPurpose + + override def pretty: Pretty[KeyPurpose.this.type] = prettyOfString(_.name) +} + +object KeyPurpose { + + val all = Set(Signing, Encryption) + + implicit val setParameterKeyPurpose: SetParameter[KeyPurpose] = (k, pp) => pp.setByte(k.id) + implicit val getResultKeyPurpose: GetResult[KeyPurpose] = GetResult { r => + r.nextByte() match { + case Signing.id => Signing + case Encryption.id => Encryption + case unknown => throw new DbDeserializationException(s"Unknown key purpose id: $unknown") + } + } + + case object Signing extends KeyPurpose { + override val name: String = "signing" + override val id: Byte = 0 + override def toProtoEnum: v0.KeyPurpose = v0.KeyPurpose.SigningKeyPurpose + } + + case object Encryption extends KeyPurpose { + override val name: String = "encryption" + override val id: Byte = 1 + override def toProtoEnum: v0.KeyPurpose = v0.KeyPurpose.EncryptionKeyPurpose + } + + def fromProtoEnum( + field: String, + purposeP: v0.KeyPurpose, + ): ParsingResult[KeyPurpose] = + purposeP match { + case v0.KeyPurpose.UnknownKeyPurpose => Left(ProtoDeserializationError.FieldNotSet(field)) + case v0.KeyPurpose.Unrecognized(value) => + Left(ProtoDeserializationError.UnrecognizedEnum(field, value)) + case v0.KeyPurpose.SigningKeyPurpose => Right(Signing) + case v0.KeyPurpose.EncryptionKeyPurpose => Right(Encryption) + } + +} + +/** Information that is cached for each view and is to be re-used if another view has + * the same recipients and transparency can be respected. + * @param sessionKeyRandomness the randomness to create the session key that is then used to encrypt the randomness of the view. + * @param encryptedSessionKeys the randomness of the session key encrypted for each recipient. + */ +final case class SessionKeyInfo( + sessionKeyRandomness: SecureRandomness, + encryptedSessionKeys: Seq[AsymmetricEncrypted[SecureRandomness]], +) diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Encryption.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Encryption.scala new file mode 100644 index 0000000000..5014882dfe --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Encryption.scala @@ -0,0 +1,590 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto + +import cats.Order +import cats.data.EitherT +import cats.instances.future.* +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.config.CantonRequireTypes.String68 +import com.digitalasset.canton.crypto.store.{ + CryptoPrivateStoreError, + CryptoPrivateStoreExtended, + CryptoPublicStoreError, +} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{DeserializationError, ProtoConverter} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.* +import com.digitalasset.canton.version.{ + HasVersionedMessageCompanion, + HasVersionedMessageCompanionDbHelpers, + HasVersionedToByteString, + HasVersionedWrapper, + ProtoVersion, + ProtocolVersion, +} +import com.google.protobuf.ByteString +import slick.jdbc.GetResult + +import scala.concurrent.{ExecutionContext, Future} + +/** Encryption operations that do not require access to a private key store but operates with provided keys. */ +trait EncryptionOps { + + protected def decryptWithInternal[M]( + encrypted: AsymmetricEncrypted[M], + privateKey: EncryptionPrivateKey, + )( + deserialize: ByteString => Either[DeserializationError, M] + ): Either[DecryptionError, M] + + def defaultSymmetricKeyScheme: SymmetricKeyScheme + + /** Generates and returns a random symmetric key using the specified scheme. */ + def generateSymmetricKey( + scheme: SymmetricKeyScheme = defaultSymmetricKeyScheme + ): Either[EncryptionKeyGenerationError, SymmetricKey] + + /** Creates a symmetric key with the specified scheme for the given randomness. */ + def createSymmetricKey( + bytes: SecureRandomness, + scheme: SymmetricKeyScheme = defaultSymmetricKeyScheme, + ): Either[EncryptionKeyCreationError, SymmetricKey] + + /** Encrypts the given bytes using the given public key */ + def encryptWith[M <: HasVersionedToByteString]( + message: M, + publicKey: EncryptionPublicKey, + version: ProtocolVersion, + ): Either[EncryptionError, AsymmetricEncrypted[M]] + + /** Deterministically encrypts the given bytes using the given public key. + * This is unsafe for general use and it's only used to encrypt the decryption key of each view + */ + def encryptDeterministicWith[M <: HasVersionedToByteString]( + message: M, + publicKey: EncryptionPublicKey, + version: ProtocolVersion, + )(implicit traceContext: TraceContext): Either[EncryptionError, AsymmetricEncrypted[M]] + + /** Decrypts a message encrypted using `encryptWith` */ + def decryptWith[M](encrypted: AsymmetricEncrypted[M], privateKey: EncryptionPrivateKey)( + deserialize: ByteString => Either[DeserializationError, M] + ): Either[DecryptionError, M] = for { + _ <- Either.cond( + encrypted.encryptedFor == privateKey.id, + (), + DecryptionError.InvalidEncryptionKey( + s"Private key ${privateKey.id} does not match the used encryption key ${encrypted.encryptedFor}" + ), + ) + message <- decryptWithInternal(encrypted, privateKey)(deserialize) + } yield message + + /** Encrypts the given message with the given symmetric key */ + def encryptWith[M <: HasVersionedToByteString]( + message: M, + symmetricKey: SymmetricKey, + version: ProtocolVersion, + ): Either[EncryptionError, Encrypted[M]] + + /** Decrypts a message encrypted using `encryptWith` */ + def decryptWith[M](encrypted: Encrypted[M], symmetricKey: SymmetricKey)( + deserialize: ByteString => Either[DeserializationError, M] + ): Either[DecryptionError, M] + +} + +/** Encryption operations that require access to stored private keys. */ +trait EncryptionPrivateOps { + + def defaultEncryptionKeyScheme: EncryptionKeyScheme + + /** Decrypts an encrypted message using the referenced private encryption key */ + def decrypt[M](encrypted: AsymmetricEncrypted[M])( + deserialize: ByteString => Either[DeserializationError, M] + )(implicit + traceContext: TraceContext + ): EitherT[Future, DecryptionError, M] + + /** Generates a new encryption key pair with the given scheme and optional name, stores the private key and returns the public key. */ + def generateEncryptionKey( + scheme: EncryptionKeyScheme = defaultEncryptionKeyScheme, + name: Option[KeyName] = None, + )(implicit + traceContext: TraceContext + ): EitherT[Future, EncryptionKeyGenerationError, EncryptionPublicKey] +} + +/** A default implementation with a private key store */ +trait EncryptionPrivateStoreOps extends EncryptionPrivateOps { + + implicit val ec: ExecutionContext + + protected def store: CryptoPrivateStoreExtended + + protected val encryptionOps: EncryptionOps + + /** Decrypts an encrypted message using the referenced private encryption key */ + def decrypt[M](encryptedMessage: AsymmetricEncrypted[M])( + deserialize: ByteString => Either[DeserializationError, M] + )(implicit tc: TraceContext): EitherT[Future, DecryptionError, M] = + store + .decryptionKey(encryptedMessage.encryptedFor)(TraceContext.todo) + .leftMap(storeError => DecryptionError.KeyStoreError(storeError.show)) + .subflatMap(_.toRight(DecryptionError.UnknownEncryptionKey(encryptedMessage.encryptedFor))) + .subflatMap(encryptionKey => + encryptionOps.decryptWith(encryptedMessage, encryptionKey)(deserialize) + ) + + /** Internal method to generate and return the entire encryption key pair */ + protected[crypto] def generateEncryptionKeypair(scheme: EncryptionKeyScheme)(implicit + traceContext: TraceContext + ): EitherT[Future, EncryptionKeyGenerationError, EncryptionKeyPair] + + def generateEncryptionKey( + scheme: EncryptionKeyScheme = defaultEncryptionKeyScheme, + name: Option[KeyName] = None, + )(implicit + traceContext: TraceContext + ): EitherT[Future, EncryptionKeyGenerationError, EncryptionPublicKey] = + for { + keypair <- generateEncryptionKeypair(scheme) + _ <- store + .storeDecryptionKey(keypair.privateKey, name) + .leftMap[EncryptionKeyGenerationError]( + EncryptionKeyGenerationError.EncryptionPrivateStoreError + ) + } yield keypair.publicKey + +} + +/** A tag to denote encrypted data. */ +final case class Encrypted[+M] private[crypto] (ciphertext: ByteString) extends NoCopy + +object Encrypted { + private[this] def apply[M](ciphertext: ByteString): Encrypted[M] = + throw new UnsupportedOperationException("Use encryption methods instead") + + def fromByteString[M](byteString: ByteString): Either[DeserializationError, Encrypted[M]] = + Right(new Encrypted[M](byteString)) +} + +final case class AsymmetricEncrypted[+M]( + ciphertext: ByteString, + encryptedFor: Fingerprint, +) extends NoCopy { + def encrypted: Encrypted[M] = new Encrypted(ciphertext) +} + +object AsymmetricEncrypted { + val noEncryptionFingerprint = Fingerprint(String68.tryCreate("no-encryption")) +} + +/** Key schemes for asymmetric/hybrid encryption. */ +sealed trait EncryptionKeyScheme extends Product with Serializable with PrettyPrinting { + def name: String + // TODO(#12757): once we decouple the key scheme from the actual encryption algorithm this will move to the algorithm + def supportDeterministicEncryption: Boolean + def toProtoEnum: v0.EncryptionKeyScheme + override val pretty: Pretty[this.type] = prettyOfString(_.name) +} + +object EncryptionKeyScheme { + + implicit val encryptionKeySchemeOrder: Order[EncryptionKeyScheme] = + Order.by[EncryptionKeyScheme, String](_.name) + + case object EciesP256HkdfHmacSha256Aes128Gcm extends EncryptionKeyScheme { + override val name: String = "ECIES-P256_HMAC256_AES128-GCM" + override val supportDeterministicEncryption: Boolean = false + override def toProtoEnum: v0.EncryptionKeyScheme = + v0.EncryptionKeyScheme.EciesP256HkdfHmacSha256Aes128Gcm + } + + /* This hybrid scheme from JCE/Bouncy Castle is intended to be used to encrypt the key for the view payload data + * and can be made deterministic (e.g. using the hash(message ++ public key) as our source of randomness). + * This way, every recipient of the view message can check that every other recipient can decrypt it + * (i.e. transparency). + */ + case object EciesP256HmacSha256Aes128Cbc extends EncryptionKeyScheme { + override val name: String = "ECIES-P256_HMAC256_AES128-CBC" + override val supportDeterministicEncryption: Boolean = true + override def toProtoEnum: v0.EncryptionKeyScheme = + v0.EncryptionKeyScheme.EciesP256HmacSha256Aes128Cbc + } + + case object Rsa2048OaepSha256 extends EncryptionKeyScheme { + override val name: String = "RSA2048-OAEP-SHA256" + override val supportDeterministicEncryption: Boolean = true + override def toProtoEnum: v0.EncryptionKeyScheme = + v0.EncryptionKeyScheme.Rsa2048OaepSha256 + } + + def fromProtoEnum( + field: String, + schemeP: v0.EncryptionKeyScheme, + ): ParsingResult[EncryptionKeyScheme] = + schemeP match { + case v0.EncryptionKeyScheme.MissingEncryptionKeyScheme => + Left(ProtoDeserializationError.FieldNotSet(field)) + case v0.EncryptionKeyScheme.Unrecognized(value) => + Left(ProtoDeserializationError.UnrecognizedEnum(field, value)) + case v0.EncryptionKeyScheme.EciesP256HkdfHmacSha256Aes128Gcm => + Right(EncryptionKeyScheme.EciesP256HkdfHmacSha256Aes128Gcm) + case v0.EncryptionKeyScheme.EciesP256HmacSha256Aes128Cbc => + Right(EncryptionKeyScheme.EciesP256HmacSha256Aes128Cbc) + case v0.EncryptionKeyScheme.Rsa2048OaepSha256 => + Right(EncryptionKeyScheme.Rsa2048OaepSha256) + } +} + +/** Key schemes for symmetric encryption. */ +sealed trait SymmetricKeyScheme extends Product with Serializable with PrettyPrinting { + def name: String + def toProtoEnum: v0.SymmetricKeyScheme + def keySizeInBytes: Int + override def pretty: Pretty[this.type] = prettyOfString(_.name) +} + +object SymmetricKeyScheme { + + implicit val symmetricKeySchemeOrder: Order[SymmetricKeyScheme] = + Order.by[SymmetricKeyScheme, String](_.name) + + /** AES with 128bit key in GCM */ + case object Aes128Gcm extends SymmetricKeyScheme { + override def name: String = "AES128-GCM" + override def toProtoEnum: v0.SymmetricKeyScheme = v0.SymmetricKeyScheme.Aes128Gcm + override def keySizeInBytes: Int = 16 + } + + def fromProtoEnum( + field: String, + schemeP: v0.SymmetricKeyScheme, + ): ParsingResult[SymmetricKeyScheme] = + schemeP match { + case v0.SymmetricKeyScheme.MissingSymmetricKeyScheme => + Left(ProtoDeserializationError.FieldNotSet(field)) + case v0.SymmetricKeyScheme.Unrecognized(value) => + Left(ProtoDeserializationError.UnrecognizedEnum(field, value)) + case v0.SymmetricKeyScheme.Aes128Gcm => Right(SymmetricKeyScheme.Aes128Gcm) + } +} + +final case class SymmetricKey( + format: CryptoKeyFormat, + protected[crypto] val key: ByteString, + scheme: SymmetricKeyScheme, +) extends CryptoKey + with HasVersionedWrapper[SymmetricKey] + with NoCopy { + override protected def companionObj = SymmetricKey + + protected def toProtoV0: v0.SymmetricKey = + v0.SymmetricKey(format = format.toProtoEnum, key = key, scheme = scheme.toProtoEnum) +} + +object SymmetricKey extends HasVersionedMessageCompanion[SymmetricKey] { + override val name: String = "SymmetricKey" + + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> ProtoCodec( + ProtocolVersion.v30, + supportedProtoVersion(v0.SymmetricKey)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + private def fromProtoV0(keyP: v0.SymmetricKey): ParsingResult[SymmetricKey] = + for { + format <- CryptoKeyFormat.fromProtoEnum("format", keyP.format) + scheme <- SymmetricKeyScheme.fromProtoEnum("scheme", keyP.scheme) + } yield new SymmetricKey(format, keyP.key, scheme) +} + +final case class EncryptionKeyPair(publicKey: EncryptionPublicKey, privateKey: EncryptionPrivateKey) + extends CryptoKeyPair[EncryptionPublicKey, EncryptionPrivateKey] + with NoCopy { + + def toProtoV0: v0.EncryptionKeyPair = + v0.EncryptionKeyPair(Some(publicKey.toProtoV0), Some(privateKey.toProtoV0)) + + protected def toProtoCryptoKeyPairPairV0: v0.CryptoKeyPair.Pair = + v0.CryptoKeyPair.Pair.EncryptionKeyPair(toProtoV0) +} + +object EncryptionKeyPair { + + private[this] def apply( + publicKey: EncryptionPublicKey, + privateKey: EncryptionPrivateKey, + ): EncryptionKeyPair = + throw new UnsupportedOperationException("Use generate or deserialization methods") + + private[crypto] def create( + id: Fingerprint, + format: CryptoKeyFormat, + publicKeyBytes: ByteString, + privateKeyBytes: ByteString, + scheme: EncryptionKeyScheme, + ): EncryptionKeyPair = { + val publicKey = new EncryptionPublicKey(id, format, publicKeyBytes, scheme) + val privateKey = new EncryptionPrivateKey(publicKey.id, format, privateKeyBytes, scheme) + new EncryptionKeyPair(publicKey, privateKey) + } + + def fromProtoV0( + encryptionKeyPairP: v0.EncryptionKeyPair + ): ParsingResult[EncryptionKeyPair] = + for { + publicKey <- ProtoConverter.parseRequired( + EncryptionPublicKey.fromProtoV0, + "public_key", + encryptionKeyPairP.publicKey, + ) + privateKey <- ProtoConverter.parseRequired( + EncryptionPrivateKey.fromProtoV0, + "private_key", + encryptionKeyPairP.privateKey, + ) + } yield new EncryptionKeyPair(publicKey, privateKey) +} + +final case class EncryptionPublicKey private[crypto] ( + id: Fingerprint, + format: CryptoKeyFormat, + protected[crypto] val key: ByteString, + scheme: EncryptionKeyScheme, +) extends PublicKey + with PrettyPrinting + with HasVersionedWrapper[EncryptionPublicKey] + with NoCopy { + + override protected def companionObj = EncryptionPublicKey + + val purpose: KeyPurpose = KeyPurpose.Encryption + + def toProtoV0: v0.EncryptionPublicKey = + v0.EncryptionPublicKey( + id = id.toProtoPrimitive, + format = format.toProtoEnum, + publicKey = key, + scheme = scheme.toProtoEnum, + ) + + override protected def toProtoPublicKeyKeyV0: v0.PublicKey.Key = + v0.PublicKey.Key.EncryptionPublicKey(toProtoV0) + + override val pretty: Pretty[EncryptionPublicKey] = + prettyOfClass(param("id", _.id), param("format", _.format), param("scheme", _.scheme)) +} + +object EncryptionPublicKey + extends HasVersionedMessageCompanion[EncryptionPublicKey] + with HasVersionedMessageCompanionDbHelpers[EncryptionPublicKey] { + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> ProtoCodec( + ProtocolVersion.v30, + supportedProtoVersion(v0.EncryptionPublicKey)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + override def name: String = "encryption public key" + + private[this] def apply( + id: Fingerprint, + format: CryptoKeyFormat, + key: ByteString, + scheme: EncryptionKeyScheme, + ): EncryptionPrivateKey = + throw new UnsupportedOperationException("Use generate or deserialization methods") + + def fromProtoV0( + publicKeyP: v0.EncryptionPublicKey + ): ParsingResult[EncryptionPublicKey] = + for { + id <- Fingerprint.fromProtoPrimitive(publicKeyP.id) + format <- CryptoKeyFormat.fromProtoEnum("format", publicKeyP.format) + scheme <- EncryptionKeyScheme.fromProtoEnum("scheme", publicKeyP.scheme) + } yield new EncryptionPublicKey(id, format, publicKeyP.publicKey, scheme) +} + +final case class EncryptionPublicKeyWithName( + override val publicKey: EncryptionPublicKey, + override val name: Option[KeyName], +) extends PublicKeyWithName { + type K = EncryptionPublicKey +} + +object EncryptionPublicKeyWithName { + implicit def getResultEncryptionPublicKeyWithName(implicit + getResultByteArray: GetResult[Array[Byte]] + ): GetResult[EncryptionPublicKeyWithName] = + GetResult { r => + EncryptionPublicKeyWithName(r.<<, r.<<) + } +} + +final case class EncryptionPrivateKey private[crypto] ( + id: Fingerprint, + format: CryptoKeyFormat, + protected[crypto] val key: ByteString, + scheme: EncryptionKeyScheme, +) extends PrivateKey + with HasVersionedWrapper[EncryptionPrivateKey] + with NoCopy { + + override protected def companionObj = EncryptionPrivateKey + + override def purpose: KeyPurpose = KeyPurpose.Encryption + + def toProtoV0: v0.EncryptionPrivateKey = + v0.EncryptionPrivateKey( + id = id.toProtoPrimitive, + format = format.toProtoEnum, + privateKey = key, + scheme = scheme.toProtoEnum, + ) + + override protected def toProtoPrivateKeyKeyV0: v0.PrivateKey.Key = + v0.PrivateKey.Key.EncryptionPrivateKey(toProtoV0) +} + +object EncryptionPrivateKey extends HasVersionedMessageCompanion[EncryptionPrivateKey] { + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> ProtoCodec( + ProtocolVersion.v30, + supportedProtoVersion(v0.EncryptionPrivateKey)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + override def name: String = "encryption private key" + + private[this] def apply( + id: Fingerprint, + format: CryptoKeyFormat, + key: ByteString, + scheme: EncryptionKeyScheme, + ): EncryptionPrivateKey = + throw new UnsupportedOperationException("Use generate or deserialization methods") + + def fromProtoV0( + privateKeyP: v0.EncryptionPrivateKey + ): ParsingResult[EncryptionPrivateKey] = + for { + id <- Fingerprint.fromProtoPrimitive(privateKeyP.id) + format <- CryptoKeyFormat.fromProtoEnum("format", privateKeyP.format) + scheme <- EncryptionKeyScheme.fromProtoEnum("scheme", privateKeyP.scheme) + } yield new EncryptionPrivateKey(id, format, privateKeyP.privateKey, scheme) +} + +sealed trait EncryptionError extends Product with Serializable with PrettyPrinting +object EncryptionError { + final case class UnsupportedSchemeForDeterministicEncryption(error: String) + extends EncryptionError { + override def pretty: Pretty[UnsupportedSchemeForDeterministicEncryption] = prettyOfClass( + unnamedParam(_.error.unquoted) + ) + } + final case class FailedToEncrypt(error: String) extends EncryptionError { + override def pretty: Pretty[FailedToEncrypt] = prettyOfClass(unnamedParam(_.error.unquoted)) + } + final case class InvalidSymmetricKey(error: String) extends EncryptionError { + override def pretty: Pretty[InvalidSymmetricKey] = prettyOfClass(unnamedParam(_.error.unquoted)) + } + final case class InvalidEncryptionKey(error: String) extends EncryptionError { + override def pretty: Pretty[InvalidEncryptionKey] = prettyOfClass( + unnamedParam(_.error.unquoted) + ) + } +} + +sealed trait DecryptionError extends Product with Serializable with PrettyPrinting +object DecryptionError { + final case class FailedToDecrypt(error: String) extends DecryptionError { + override def pretty: Pretty[FailedToDecrypt] = prettyOfClass(unnamedParam(_.error.unquoted)) + } + final case class InvalidSymmetricKey(error: String) extends DecryptionError { + override def pretty: Pretty[InvalidSymmetricKey] = prettyOfClass(unnamedParam(_.error.unquoted)) + } + final case class InvariantViolation(error: String) extends DecryptionError { + override def pretty: Pretty[InvariantViolation] = prettyOfClass(unnamedParam(_.error.unquoted)) + } + final case class InvalidEncryptionKey(error: String) extends DecryptionError { + override def pretty: Pretty[InvalidEncryptionKey] = prettyOfClass( + unnamedParam(_.error.unquoted) + ) + } + final case class UnknownEncryptionKey(keyId: Fingerprint) extends DecryptionError { + override def pretty: Pretty[UnknownEncryptionKey] = prettyOfClass(param("keyId", _.keyId)) + } + final case class DecryptionKeyError(error: CryptoPrivateStoreError) extends DecryptionError { + override def pretty: Pretty[DecryptionKeyError] = prettyOfClass(unnamedParam(_.error)) + } + final case class FailedToDeserialize(error: DeserializationError) extends DecryptionError { + override def pretty: Pretty[FailedToDeserialize] = prettyOfClass(unnamedParam(_.error)) + } + final case class KeyStoreError(error: String) extends DecryptionError { + override def pretty: Pretty[KeyStoreError] = prettyOfClass(unnamedParam(_.error.unquoted)) + } +} + +sealed trait EncryptionKeyGenerationError extends Product with Serializable with PrettyPrinting +object EncryptionKeyGenerationError { + + final case class GeneralError(error: Exception) extends EncryptionKeyGenerationError { + override def pretty: Pretty[GeneralError] = prettyOfClass(unnamedParam(_.error)) + } + + final case class GeneralKmsError(error: String) extends EncryptionKeyGenerationError { + override def pretty: Pretty[GeneralKmsError] = prettyOfClass(unnamedParam(_.error.unquoted)) + } + + final case class NameInvalidError(error: String) extends EncryptionKeyGenerationError { + override def pretty: Pretty[NameInvalidError] = prettyOfClass(unnamedParam(_.error.unquoted)) + } + + final case class FingerprintError(error: String) extends EncryptionKeyGenerationError { + override def pretty: Pretty[FingerprintError] = prettyOfClass(unnamedParam(_.error.unquoted)) + } + + final case class UnsupportedKeyScheme(scheme: EncryptionKeyScheme) + extends EncryptionKeyGenerationError { + override def pretty: Pretty[UnsupportedKeyScheme] = prettyOfClass(param("scheme", _.scheme)) + } + + final case class EncryptionPrivateStoreError(error: CryptoPrivateStoreError) + extends EncryptionKeyGenerationError { + override def pretty: Pretty[EncryptionPrivateStoreError] = prettyOfClass(unnamedParam(_.error)) + } + + final case class EncryptionPublicStoreError(error: CryptoPublicStoreError) + extends EncryptionKeyGenerationError { + override def pretty: Pretty[EncryptionPublicStoreError] = prettyOfClass(unnamedParam(_.error)) + } +} + +sealed trait EncryptionKeyCreationError extends Product with Serializable with PrettyPrinting +object EncryptionKeyCreationError { + + final case class InvalidRandomnessLength(randomnessLength: Int, expectedKeyLength: Int) + extends EncryptionKeyCreationError { + override def pretty: Pretty[InvalidRandomnessLength] = prettyOfClass( + param("provided randomness length", _.randomnessLength), + param("expected key length", _.expectedKeyLength), + ) + } + + final case class InternalConversionError(error: String) extends EncryptionKeyCreationError { + override def pretty: Pretty[InternalConversionError] = prettyOfClass( + unnamedParam(_.error.unquoted) + ) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Hash.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Hash.scala new file mode 100644 index 0000000000..e624b54f49 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Hash.scala @@ -0,0 +1,224 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto + +import cats.Order +import cats.syntax.either.* +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.ProtoDeserializationError.CryptoDeserializationError +import com.digitalasset.canton.config.CantonRequireTypes.String68 +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{ + DefaultDeserializationError, + DeserializationError, + DeterministicEncoding, + HasCryptographicEvidence, +} +import com.digitalasset.canton.util.HexString +import com.google.protobuf.ByteString +import slick.jdbc.{GetResult, SetParameter} + +/** A Multi-hash compatible description of a hash algorithm + * + * @param name Human readable name, must align with JCE hash algorithm names + * @param index Multi-hash index + * @param length Length of the hash in bytes + * + * NOTE: There exists an upper limit on the supported lengths due to serialization constraints on LF ledger strings. + * The total length of a multi-hash hash (including the var-int index and length encoding, plus the actual length of + * the hash) must not exceed 46 bytes. + * + * NOTE: there exists a similar, soft upper limit, on the supported lengths due to string length constraints in the + * database. The total amount of bytes produced by `digest` should not exceed 512 bytes (this will lead to a HexString of + * length 1024). If needed, this limit can be increased by increasing the allowed characters for varchar's in the DBs. + */ +sealed abstract class HashAlgorithm(val name: String, val index: Long, val length: Long) + extends PrettyPrinting { + def toProtoEnum: v0.HashAlgorithm + + override def pretty: Pretty[HashAlgorithm] = prettyOfString(_.name) +} + +object HashAlgorithm { + + implicit val hashAlgorithmOrder: Order[HashAlgorithm] = Order.by[HashAlgorithm, Long](_.index) + + val algorithms: Map[Long, HashAlgorithm] = Map { + 0x12L -> Sha256 + } + + case object Sha256 extends HashAlgorithm("SHA-256", 0x12, 32) { + override def toProtoEnum: v0.HashAlgorithm = v0.HashAlgorithm.Sha256 + } + + def lookup(index: Long, length: Long): Either[String, HashAlgorithm] = + for { + algo <- algorithms.get(index).toRight(s"Unknown hash algorithm for index: $index") + _ <- Either.cond( + algo.length == length, + (), + s"Mismatch of lengths for ${algo.name}: given $length, expected ${algo.length}", + ) + } yield algo + + def fromProtoEnum( + field: String, + hashAlgorithmP: v0.HashAlgorithm, + ): ParsingResult[HashAlgorithm] = + hashAlgorithmP match { + case v0.HashAlgorithm.MissingHashAlgorithm => + Left(ProtoDeserializationError.FieldNotSet(field)) + case v0.HashAlgorithm.Unrecognized(value) => + Left(ProtoDeserializationError.UnrecognizedEnum(field, value)) + case v0.HashAlgorithm.Sha256 => Right(Sha256) + } +} + +final case class Hash private (private val hash: ByteString, private val algorithm: HashAlgorithm) + extends HasCryptographicEvidence + with Ordered[Hash] + with PrettyPrinting { + + require(!hash.isEmpty, "Hash must not be empty") + require( + hash.size() == algorithm.length, + s"Hash size ${hash.size()} must match hash algorithm length ${algorithm.length}", + ) + + /** Multi-hash compatible serialization */ + override def getCryptographicEvidence: ByteString = + DeterministicEncoding + .encodeUVarInt(algorithm.index) + .concat(DeterministicEncoding.encodeUVarInt(algorithm.length)) + .concat(hash) + + // A serialization of the entire multi-hash to a hex string + val toHexString: String = HexString.toHexString(getCryptographicEvidence) + // We assume/require that the HexString of a hash has at most 68 characters (for reference: outputs of our SHA256 algorithm have + // exactly 68 HexString characters). If you want to increase this limit, please consult the team, see also documentation at `LengthLimitedString` for more details + val toLengthLimitedHexString: String68 = + String68.tryCreate(toHexString, Some("HexString of hash")) + + def compare(that: Hash): Int = { + this.toHexString.compare(that.toHexString) + } + + override val pretty: Pretty[Hash] = prettyOfString(hash => + s"${hash.algorithm.name}:${HexString.toHexString(hash.hash).readableHash}" + ) + + /** Access to the raw hash, should NOT be used for serialization. */ + private[canton] def unwrap: ByteString = hash +} + +object Hash { + + implicit val setParameterHash: SetParameter[Hash] = (hash, pp) => { + import com.digitalasset.canton.resource.DbStorage.Implicits.setParameterByteString + pp.>>(hash.getCryptographicEvidence) + } + + implicit val getResultHash: GetResult[Hash] = GetResult { r => + import com.digitalasset.canton.resource.DbStorage.Implicits.getResultByteString + tryFromByteString(r.<<) + } + + implicit val setParameterOptionHash: SetParameter[Option[Hash]] = (hash, pp) => { + import com.digitalasset.canton.resource.DbStorage.Implicits.setParameterByteStringOption + pp.>>(hash.map(_.getCryptographicEvidence)) + } + + implicit val getResultOptionHash: GetResult[Option[Hash]] = GetResult { r => + import com.digitalasset.canton.resource.DbStorage.Implicits.getResultByteStringOption + (r.<<[Option[ByteString]]).map(bytes => tryFromByteString(bytes)) + + } + + private[crypto] def tryCreate(hash: ByteString, algorithm: HashAlgorithm): Hash = + create(hash, algorithm).valueOr(err => throw new IllegalArgumentException(err)) + + private[crypto] def create(hash: ByteString, algorithm: HashAlgorithm): Either[String, Hash] = + Either.cond( + hash.size() == algorithm.length, + new Hash(hash, algorithm), + s"Size of given hash ${hash.size()} does not match expected size ${algorithm.length} for ${algorithm.name}", + ) + + private def tryFromByteString(bytes: ByteString): Hash = + fromByteString(bytes).valueOr(err => + throw new IllegalArgumentException(s"Failed to deserialize hash from $bytes: $err") + ) + + def build(purpose: HashPurpose, algorithm: HashAlgorithm): HashBuilder = + new HashBuilderFromMessageDigest(algorithm, purpose) + + def digest(purpose: HashPurpose, bytes: ByteString, algorithm: HashAlgorithm): Hash = { + // It's safe to use `addWithoutLengthPrefix` because there cannot be hash collisions due to concatenation + // as we're immediately calling `finish`. + build(purpose, algorithm).addWithoutLengthPrefix(bytes).finish() + } + + def fromProtoPrimitive(bytes: ByteString): ParsingResult[Hash] = + fromByteString(bytes).leftMap(CryptoDeserializationError) + + def fromProtoPrimitiveOption(bytes: ByteString): ParsingResult[Option[Hash]] = + fromByteStringOption(bytes).leftMap(CryptoDeserializationError) + + /** Decode a serialized [[Hash]] from a multi-hash format. */ + def fromByteString(bytes: ByteString): Either[DeserializationError, Hash] = + for { + indexAndBytes <- DeterministicEncoding.decodeUVarInt(bytes) + (index, lengthAndHashBytes) = indexAndBytes + lengthAndBytes <- DeterministicEncoding.decodeUVarInt(lengthAndHashBytes) + (length, hashBytes) = lengthAndBytes + algorithm <- HashAlgorithm + .lookup(index, length) + .leftMap(err => DefaultDeserializationError(s"Invalid hash algorithm: $err")) + hash <- create(hashBytes, algorithm).leftMap(err => DefaultDeserializationError(err)) + } yield hash + + /** Decode a serialized [[Hash]] using [[fromByteString]] except for the empty [[com.google.protobuf.ByteString]], + * which maps to [[scala.None$]]. + */ + def fromByteStringOption(bytes: ByteString): Either[DeserializationError, Option[Hash]] = + if (bytes.isEmpty) Right(None) else fromByteString(bytes).map(Some(_)) + + def fromHexString(hexString: String): Either[DeserializationError, Hash] = + HexString + .parse(hexString) + .toRight(DefaultDeserializationError(s"Failed to parse hex string: $hexString")) + .map(ByteString.copyFrom) + .flatMap(fromByteString) + + def tryFromHexString(hexString: String): Hash = + fromHexString(hexString).valueOr(err => + throw new IllegalArgumentException(s"Invalid hex string: $err") + ) +} + +/** Trait only needed if we want to make the default algorithm configurable + */ +trait HashOps { + + def defaultHashAlgorithm: HashAlgorithm + + /** Creates a [[HashBuilder]] for computing a hash with the given purpose. + * For different purposes `purpose1` and `purpose2`, all implementations must ensure + * that it is computationally infeasible to find a sequence `bs` of [[com.google.protobuf.ByteString]]s + * such that `bs.foldLeft(hashBuilder(purpose1))((b, hb) => hb.add(b)).finish` + * and `bs.foldLeft(hashBuilder(purpose2))((b, hb) => hb.add(b)).finish` + * yield the same hash. + */ + def build(purpose: HashPurpose, algorithm: HashAlgorithm = defaultHashAlgorithm): HashBuilder = + Hash.build(purpose, algorithm) + + /** Convenience method for `build(purpose).addWithoutLengthPrefix(bytes).finish` */ + def digest( + purpose: HashPurpose, + bytes: ByteString, + algorithm: HashAlgorithm = defaultHashAlgorithm, + ): Hash = + Hash.digest(purpose, bytes, algorithm) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/HashBuilder.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/HashBuilder.scala new file mode 100644 index 0000000000..3dab79236a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/HashBuilder.scala @@ -0,0 +1,110 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto + +import com.digitalasset.canton.checked +import com.digitalasset.canton.serialization.DeterministicEncoding +import com.google.protobuf.ByteString + +import java.nio.ByteBuffer +import java.security.MessageDigest + +/** The methods of [[HashBuilder]] change its internal state and return `this` for convenience. + * + * Requirements for all implementations: + * + * For any [[HashBuilder]] hb, + * it is computationally infeasible to find two sequences `as1` and `as2` of calls to `add` + * such that the concatenation of `as1` differs from the concatenation `as2`, + * yet their computed hashes are the same, i.e., + * `as1.foldLeft(hb)((hb, a) => hb.add(a)).finish` is the same as `as2.foldLeft(hb)((hb, a) => hb.add(a)).finish`. + */ +trait HashBuilder { + + /** Appends a [[com.google.protobuf.ByteString]] `a` to the sequence of bytes to be hashed. + * Use `add` for [[com.google.protobuf.ByteString]]s of variable length + * to prevent hash collisions due to concatenation of variable-length strings. + * + * Document at the call site in production code why it is not necessary to include a length prefix. + * + * @return the updated hash builder + * @throws java.lang.IllegalStateException if the [[finish]] method has already been called on this [[HashBuilder]] + */ + def addWithoutLengthPrefix(a: ByteString): this.type + + /** Appends the length of `a` (encoded as fixed length [[com.google.protobuf.ByteString]]) as well as `a` to this builder. + * + * @return the updated hash builder + * @throws java.lang.IllegalStateException if the [[finish]] method has already been called on this [[HashBuilder]] + */ + def add(a: ByteString): this.type = add(a.size).addWithoutLengthPrefix(a) + + /** Shorthand for `addWithoutLengthPrefix(ByteString.copyFrom(a))` */ + def addWithoutLengthPrefix(a: Array[Byte]): this.type = addWithoutLengthPrefix( + ByteString.copyFrom(a) + ) + + /** Shorthand for `addWithoutLengthPrefix(ByteString.copyFromUtf8(a))` + * Use `add` for strings of variable length + * to prevent hash collisions due to concatenation of variable-length strings. + * + * Document at the call site in production code why it is not necessary to include a length prefix. + */ + def addWithoutLengthPrefix(a: String): this.type = addWithoutLengthPrefix( + ByteString.copyFromUtf8(a) + ) + + /** Shorthand for `add(ByteString.copyFromUtf8(a))` */ + def add(a: String): this.type = add(ByteString.copyFromUtf8(a)) + + /** Shorthand for `addWithoutLengthPrefix(DeterministicEncoding.encodeInt(a))` */ + def add(a: Int): this.type = addWithoutLengthPrefix(DeterministicEncoding.encodeInt(a)) + + /** Shorthand for `addWithoutLengthPrefix(DeterministicEncoding.encodeLong(a))` */ + def add(a: Long): this.type = addWithoutLengthPrefix(DeterministicEncoding.encodeLong(a)) + + /** Terminates the building of the hash. + * No more additions can be made using `HashBuilder.addWithoutLengthPrefix` after this method has been called. + * + * @return The hash of the array accumulated so far. + * @throws java.lang.IllegalStateException if [[finish]] had been called before on this [[HashBuilder]] + */ + def finish(): Hash +} + +/** Constructs a [[HashBuilder]] from the specified [[MessageDigest]] + */ +private[crypto] class HashBuilderFromMessageDigest(algorithm: HashAlgorithm, purpose: HashPurpose) + extends HashBuilder { + + @SuppressWarnings(Array("org.wartremover.warts.Var")) + private var finished: Boolean = false + + private val md: MessageDigest = MessageDigest.getInstance(algorithm.name) + + { + md.update(ByteBuffer.allocate(java.lang.Integer.BYTES).putInt(purpose.id).array()) + } + + override def addWithoutLengthPrefix(a: Array[Byte]): this.type = { + assertNotFinished() + md.update(a) + this + } + + override def addWithoutLengthPrefix(a: ByteString): this.type = addWithoutLengthPrefix( + a.toByteArray + ) + + override def finish(): Hash = { + assertNotFinished() + finished = true + val hash = ByteString.copyFrom(md.digest) + checked(Hash.tryCreate(hash, algorithm)) + } + + private def assertNotFinished(): Unit = + if (finished) + throw new IllegalStateException(s"HashBuilder for $purpose has already been finalized.") +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/HashPurpose.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/HashPurpose.scala new file mode 100644 index 0000000000..6074be4a32 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/HashPurpose.scala @@ -0,0 +1,89 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto + +import scala.collection.mutable + +/** The purpose of a hash serves to avoid hash collisions due to equal encodings for different objects. + * It is in general not possible to derive the purpose of the hash from the hash alone. + * + * Whenever a hash is computed using [[HashOps]], a [[HashPurpose]] must be specified that gets included in the hash. + * To reliably prevent hash collisions, every [[HashPurpose]] object should be used only in a single place. + * + * All [[HashPurpose]] objects must be created through the [[HashPurpose$.apply]] method, which checks that the id is + * fresh. + * + * @param id The identifier for the [[HashPurpose]]. + * Every [[HashPurpose]] object must have a unique [[id]]. + */ +class HashPurpose private (val id: Int) extends AnyVal + +object HashPurpose { + private val ids: mutable.Map[Int, String] = mutable.TreeMap.empty[Int, String] + + /** Creates a new [[HashPurpose]] with a given description */ + def apply(id: Int, description: String): HashPurpose = { + ids.put(id, description).foreach { oldDescription => + throw new IllegalArgumentException( + s"requirement failed: HashPurpose with id=$id already exists for $oldDescription" + ) + } + + new HashPurpose(id) + } + + /** Returns the description that was given when the hash purpose was created. */ + def description(hashPurpose: HashPurpose): String = + ids.getOrElse( + hashPurpose.id, + throw new IllegalStateException( + s"Hash purpose with id ${hashPurpose.id} has been created without going through apply" + ), + ) + + /* HashPurposes are listed as `val` rather than `case object`s such that they are initialized eagerly. + * This ensures that HashPurpose id clashes are detected eagerly. Otherwise, it may be there are two hash purposes + * with the same id, but they are never used in the same Java process and therefore the clash is not detected. + */ + val SequencedEventSignature = HashPurpose(1, "SequencedEventSignature") + val Hmac = HashPurpose(2, "Hmac") + val MerkleTreeInnerNode = HashPurpose(3, "MerkleTreeInnerNode") + val Discriminator = HashPurpose(4, "Discriminator") + val SubmitterMetadata = HashPurpose(5, "SubmitterMetadata") + val CommonMetadata = HashPurpose(6, "CommonMetadata") + val ParticipantMetadata = HashPurpose(7, "ParticipantMetadata") + val ViewCommonData = HashPurpose(8, "ViewCommonData") + val ViewParticipantData = HashPurpose(9, "ViewParticipantData") + val MalformedMediatorRequestResult = HashPurpose(10, "MalformedMediatorRequestResult") + val TopologyTransactionSignature = HashPurpose(11, "TopologyTransactionSignature") + val PublicKeyFingerprint = HashPurpose(12, "PublicKeyFingerprint") + val DarIdentifier = HashPurpose(13, "DarIdentifier") + val AuthenticationToken = HashPurpose(14, "AuthenticationToken") + val AgreementId = HashPurpose(15, "AgreementId") + val MediatorResponseSignature = HashPurpose(16, "MediatorResponseSignature") + val TransactionResultSignature = HashPurpose(17, "TransactionResultSignature") + val TransferResultSignature = HashPurpose(19, "TransferResultSignature") + val ParticipantStateSignature = HashPurpose(20, "ParticipantStateSignature") + val DomainTopologyTransactionMessageSignature = + HashPurpose(21, "DomainTopologyTransactionMessageSignature") + val AcsCommitment = HashPurpose(22, "AcsCommitment") + val Stakeholders = HashPurpose(23, "Stakeholders") + val TransferOutCommonData = HashPurpose(24, "TransferOutCommonData") + val TransferOutView = HashPurpose(25, "TransferOutView") + val TransferInCommonData = HashPurpose(26, "TransferInCommonData") + val TransferInView = HashPurpose(27, "TransferInView") + val TransferViewTreeMessageSeed = HashPurpose(28, "TransferViewTreeMessageSeed") + val Unicum = HashPurpose(29, "Unicum") + val RepairTransactionId = HashPurpose(30, "RepairTransactionId") + val MediatorLeadershipEvent = HashPurpose(31, "MediatorLeadershipEvent") + val LegalIdentityClaim = HashPurpose(32, "LegalIdentityClaim") + val DbLockId = HashPurpose(33, "DbLockId") + val AcsCommitmentDb = HashPurpose(34, "AcsCommitmentDb") + val SubmissionRequestSignature = HashPurpose(35, "SubmissionRequestSignature") + val AcknowledgementSignature = HashPurpose(36, "AcknowledgementSignature") + val UnionspaceNamespace = HashPurpose(37, "UnionspaceNamespace") + val SignedProtocolMessageSignature = HashPurpose(38, "SignedProtocolMessageSignature") + val AggregationId = HashPurpose(39, "AggregationId") + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Hkdf.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Hkdf.scala new file mode 100644 index 0000000000..50ebdfa5bd --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Hkdf.scala @@ -0,0 +1,126 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto + +import com.digitalasset.canton.data.ViewPosition.MerklePathElement +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.ByteString + +/** The expansion step of the HMAC-based key derivation function (HKDF) as defined in: + * https://tools.ietf.org/html/rfc5869 + */ +trait HkdfOps { + this: HmacOps => + + /** Sanity check the parameters to the HKDF before calling the internal implementations. */ + private def checkParameters( + outputBytes: Int, + algorithm: HmacAlgorithm = defaultHmacAlgorithm, + ): Either[HkdfError, Unit] = { + import HkdfError.* + + for { + _ <- Either.cond(outputBytes >= 0, (), HkdfOutputNegative(outputBytes)) + hashBytes = algorithm.hashAlgorithm.length + nrChunks = scala.math.ceil(outputBytes.toDouble / hashBytes).toInt + _ <- Either + .cond[HkdfError, Unit]( + nrChunks <= 255, + (), + HkdfOutputTooLong(length = outputBytes, maximum = hashBytes * 255), + ) + } yield () + } + + protected def hkdfExpandInternal( + keyMaterial: SecureRandomness, + outputBytes: Int, + info: HkdfInfo, + algorithm: HmacAlgorithm = defaultHmacAlgorithm, + ): Either[HkdfError, SecureRandomness] + + protected def computeHkdfInternal( + keyMaterial: ByteString, + outputBytes: Int, + info: HkdfInfo, + salt: ByteString = ByteString.EMPTY, + algorithm: HmacAlgorithm = defaultHmacAlgorithm, + ): Either[HkdfError, SecureRandomness] + + /** Produce a new secret from the given key material using the HKDF from RFC 5869 with both extract and expand phases. + * + * @param keyMaterial Input key material from which to derive another key. + * @param outputBytes The length of the produced secret. May be at most 255 times the size of the output of the + * selected hashing algorithm. If you need to derive multiple keys, set the `info` parameter + * to different values, for each key that you need. + * @param info Specify the purpose of the derived key (optional). Note that you can derive multiple + * independent keys from the same key material by varying the purpose. + * @param salt Optional salt. Should be set if the input key material is not cryptographically secure, uniformly random. + * @param algorithm The hash algorithm to be used for the HKDF construction + */ + def computeHkdf( + keyMaterial: ByteString, + outputBytes: Int, + info: HkdfInfo, + salt: ByteString = ByteString.EMPTY, + algorithm: HmacAlgorithm = defaultHmacAlgorithm, + ): Either[HkdfError, SecureRandomness] = for { + _ <- checkParameters(outputBytes, algorithm) + expansion <- computeHkdfInternal(keyMaterial, outputBytes, info, salt, algorithm) + } yield expansion + +} + +/** Ensures unique values of "info" HKDF parameter for the different usages of the KDF. E.g., we may have + * one purpose for deriving the encryption key for a view from a random value, and another one for deriving the random + * values used for the subviews. + */ +class HkdfInfo private (val bytes: ByteString) extends AnyVal + +object HkdfInfo { + + /** Use when deriving a view encryption key from randomness */ + val ViewKey = new HkdfInfo(ByteString.copyFromUtf8("view-key")) + + /** Use when deriving subview-randomness from the randomness used for a view */ + def subview(position: MerklePathElement) = + new HkdfInfo(ByteString.copyFromUtf8("subview-").concat(position.encodeDeterministically)) + + /** Use when deriving a session encryption key from randomness */ + val SessionKey = new HkdfInfo(ByteString.copyFromUtf8("session-key")) + + /** Used to specify arbitrary randomness for golden tests. Don't use in production! */ + @VisibleForTesting + def testOnly(bytes: ByteString) = new HkdfInfo(bytes) +} + +sealed trait HkdfError extends Product with Serializable with PrettyPrinting + +object HkdfError { + + final case class HkdfOutputNegative(length: Int) extends HkdfError { + override def pretty: Pretty[HkdfOutputNegative] = prettyOfClass( + param("length", _.length) + ) + } + + final case class HkdfOutputTooLong(length: Int, maximum: Long) extends HkdfError { + override def pretty: Pretty[HkdfOutputTooLong] = prettyOfClass( + param("length", _.length), + param("maximum", _.maximum), + ) + } + + final case class HkdfHmacError(error: HmacError) extends HkdfError { + override def pretty: Pretty[HkdfHmacError] = prettyOfClass( + unnamedParam(_.error) + ) + } + + final case class HkdfInternalError(error: String) extends HkdfError { + override def pretty: Pretty[HkdfInternalError] = prettyOfClass(unnamedParam(_.error.unquoted)) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Hmac.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Hmac.scala new file mode 100644 index 0000000000..8042009514 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Hmac.scala @@ -0,0 +1,210 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto + +import cats.syntax.either.* +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.crypto.store.CryptoPrivateStoreError +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyInstances, PrettyPrinting, PrettyUtil} +import com.digitalasset.canton.serialization.DefaultDeserializationError +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.store.db.DbSerializationException +import com.google.protobuf.ByteString +import slick.jdbc.{GetResult, SetParameter} + +import java.security.{InvalidKeyException, NoSuchAlgorithmException} +import javax.crypto.Mac +import javax.crypto.spec.SecretKeySpec + +sealed abstract class HmacAlgorithm(val name: String, val hashAlgorithm: HashAlgorithm) + extends PrettyPrinting { + + def toProtoEnum: v0.HmacAlgorithm + + override def pretty: Pretty[HmacAlgorithm] = prettyOfString(_.name) +} + +object HmacAlgorithm { + + val algorithms: Seq[HmacAlgorithm] = Seq(HmacSha256) + + case object HmacSha256 extends HmacAlgorithm("HMACSHA256", HashAlgorithm.Sha256) { + override def toProtoEnum: v0.HmacAlgorithm = v0.HmacAlgorithm.HmacSha256 + } + + def fromProtoEnum( + field: String, + hmacAlgorithmP: v0.HmacAlgorithm, + ): ParsingResult[HmacAlgorithm] = + hmacAlgorithmP match { + case v0.HmacAlgorithm.MissingHmacAlgorithm => + Left(ProtoDeserializationError.FieldNotSet(field)) + case v0.HmacAlgorithm.HmacSha256 => Right(HmacSha256) + case v0.HmacAlgorithm.Unrecognized(value) => + Left(ProtoDeserializationError.UnrecognizedEnum(field, value)) + } + +} + +final case class Hmac private (private val hmac: ByteString, private val algorithm: HmacAlgorithm) + extends PrettyPrinting { + + require(!hmac.isEmpty, "HMAC must not be empty") + require( + hmac.size() == algorithm.hashAlgorithm.length, + s"HMAC size ${hmac.size()} must match HMAC's hash algorithm length ${algorithm.hashAlgorithm.length}", + ) + + def toProtoV0: v0.Hmac = + v0.Hmac(algorithm = algorithm.toProtoEnum, hmac = hmac) + + override def pretty: Pretty[Hmac] = { + implicit val ps = PrettyInstances.prettyString + PrettyUtil.prettyInfix[Hmac](_.algorithm.name, ":", _.hmac) + } + + /** Access to the raw HMAC, should NOT be used for serialization. */ + def unwrap: ByteString = hmac +} + +object Hmac { + + import HmacError.* + + private[crypto] def create( + hmac: ByteString, + algorithm: HmacAlgorithm, + ): Either[HmacError, Hmac] = { + Either.cond( + hmac.size() == algorithm.hashAlgorithm.length, + new Hmac(hmac, algorithm), + InvalidHmacLength(hmac.size(), algorithm.hashAlgorithm.length), + ) + } + + def fromProtoV0(hmacP: v0.Hmac): ParsingResult[Hmac] = + for { + hmacAlgorithm <- HmacAlgorithm.fromProtoEnum("algorithm", hmacP.algorithm) + hmac <- Hmac + .create(hmacP.hmac, hmacAlgorithm) + .leftMap(err => + ProtoDeserializationError.CryptoDeserializationError( + DefaultDeserializationError(s"Failed to deserialize HMAC: $err") + ) + ) + } yield hmac + + /** Computes the HMAC of the given message using an explicit secret. + * See [[https://en.wikipedia.org/wiki/HMAC]] + */ + def compute( + secret: HmacSecret, + message: ByteString, + algorithm: HmacAlgorithm, + ): Either[HmacError, Hmac] = + for { + mac <- Either + .catchOnly[NoSuchAlgorithmException](Mac.getInstance(algorithm.name)) + .leftMap(ex => UnknownHmacAlgorithm(algorithm, ex)) + key = new SecretKeySpec(secret.unwrap.toByteArray, algorithm.name) + _ <- Either.catchOnly[InvalidKeyException](mac.init(key)).leftMap(ex => InvalidHmacSecret(ex)) + hmacBytes <- Either + .catchOnly[IllegalStateException](mac.doFinal(message.toByteArray)) + .leftMap(ex => FailedToComputeHmac(ex)) + hmac = new Hmac(ByteString.copyFrom(hmacBytes), algorithm) + } yield hmac +} + +final case class HmacSecret private (private val secret: ByteString) extends PrettyPrinting { + + require(!secret.isEmpty, "HMAC secret cannot be empty") + + private[crypto] def unwrap: ByteString = secret + + // intentionally removing the value from toString to avoid printing secret in logs + override def pretty: Pretty[HmacSecret] = + prettyOfString(secret => s"HmacSecret(length: ${secret.length})") + + val length: Int = secret.size() +} + +object HmacSecret { + + implicit val setHmacSecretParameter: SetParameter[HmacSecret] = (v, pp) => { + import com.digitalasset.canton.resource.DbStorage.Implicits.setParameterByteString + pp.>>(v.secret) + } + + implicit val getHmacSecretResult: GetResult[HmacSecret] = GetResult { r => + import com.digitalasset.canton.resource.DbStorage.Implicits.getResultByteString + HmacSecret + .create(r.<<) + .valueOr(err => + throw new DbSerializationException(s"Failed to deserialize HMAC secret: $err") + ) + } + + /** Recommended length for HMAC secret keys is 128 bits */ + val defaultLength = 16 + + private[crypto] def create(bytes: ByteString): Either[HmacError, HmacSecret] = + Either.cond(!bytes.isEmpty, new HmacSecret(bytes), HmacError.EmptyHmacSecret) + + /** Generates a new random HMAC secret key. A minimum secret key length of 128 bits is enforced. + * + * NOTE: The length of the HMAC secret should not exceed the internal _block_ size of the hash function, + * e.g., 512 bits for SHA256. + */ + def generate(randomOps: RandomOps, length: Int = defaultLength): HmacSecret = { + require(length >= defaultLength, s"Specified HMAC secret key length ${length} too small.") + new HmacSecret(randomOps.generateRandomByteString(length)) + } +} + +/** pure HMAC operations that do not require access to external keys. */ +trait HmacOps { + + def defaultHmacAlgorithm: HmacAlgorithm = HmacAlgorithm.HmacSha256 + + def hmacWithSecret( + secret: HmacSecret, + message: ByteString, + algorithm: HmacAlgorithm = defaultHmacAlgorithm, + ): Either[HmacError, Hmac] = + Hmac.compute(secret, message, algorithm) + +} + +sealed trait HmacError extends Product with Serializable with PrettyPrinting + +object HmacError { + final case class UnknownHmacAlgorithm(algorithm: HmacAlgorithm, cause: Exception) + extends HmacError { + override def pretty: Pretty[UnknownHmacAlgorithm] = prettyOfClass( + param("algorithm", _.algorithm.name.unquoted), + param("cause", _.cause), + ) + } + case object EmptyHmacSecret extends HmacError { + override def pretty: Pretty[EmptyHmacSecret.type] = prettyOfObject[EmptyHmacSecret.type] + } + final case class InvalidHmacSecret(cause: Exception) extends HmacError { + override def pretty: Pretty[InvalidHmacSecret] = prettyOfClass(unnamedParam(_.cause)) + } + final case class FailedToComputeHmac(cause: Exception) extends HmacError { + override def pretty: Pretty[FailedToComputeHmac] = prettyOfClass(unnamedParam(_.cause)) + } + final case class InvalidHmacLength(inputLength: Int, expectedLength: Long) extends HmacError { + override def pretty: Pretty[InvalidHmacLength] = prettyOfClass( + param("inputLength", _.inputLength), + param("expectedLength", _.expectedLength), + ) + } + case object MissingHmacSecret extends HmacError { + override def pretty: Pretty[MissingHmacSecret.type] = prettyOfObject[MissingHmacSecret.type] + } + final case class HmacPrivateStoreError(error: CryptoPrivateStoreError) extends HmacError { + override def pretty: Pretty[HmacPrivateStoreError] = prettyOfParam(_.error) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/JavaKeyConverter.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/JavaKeyConverter.scala new file mode 100644 index 0000000000..2ab4ef1d60 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/JavaKeyConverter.scala @@ -0,0 +1,152 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto + +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.util.ErrorUtil +import com.google.protobuf.ByteString +import org.bouncycastle.asn1.edec.EdECObjectIdentifiers +import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers +import org.bouncycastle.asn1.sec.SECObjectIdentifiers +import org.bouncycastle.asn1.x509.{AlgorithmIdentifier, SubjectPublicKeyInfo} +import org.bouncycastle.asn1.x9.X9ObjectIdentifiers +import org.bouncycastle.openssl.PEMParser + +import java.io.{IOException, StringReader} +import java.security.PublicKey as JPublicKey + +trait JavaKeyConverter { + + /** Convert to Java public key */ + def toJava( + publicKey: PublicKey + ): Either[JavaKeyConversionError, (AlgorithmIdentifier, JPublicKey)] + + /** Convert a Java public key into a Canton signing public key. + * + * We take the fingerprint as an argument instead of computing it again, because if we convert between public keys of + * different crypto providers their fingerprint computation is different and can result in the converted key not + * having the same fingerprint as the original key. + */ + // TODO(i4612): Remove fingerprint once Tink provider is removed and we have a consistent fingerprint computation. + def fromJavaSigningKey( + publicKey: JPublicKey, + algorithmIdentifier: AlgorithmIdentifier, + fingerprint: Fingerprint, + ): Either[JavaKeyConversionError, SigningPublicKey] + + def fromJavaEncryptionKey( + publicKey: JPublicKey, + algorithmIdentifier: AlgorithmIdentifier, + fingerprint: Fingerprint, + ): Either[JavaKeyConversionError, EncryptionPublicKey] +} + +object JavaKeyConverter { + + def toSigningKeyScheme( + algoId: AlgorithmIdentifier + ): Either[JavaKeyConversionError, SigningKeyScheme] = + algoId.getAlgorithm match { + case X9ObjectIdentifiers.ecdsa_with_SHA256 | SECObjectIdentifiers.secp256r1 => + Right(SigningKeyScheme.EcDsaP256) + case X9ObjectIdentifiers.ecdsa_with_SHA384 | SECObjectIdentifiers.secp384r1 => + Right(SigningKeyScheme.EcDsaP384) + case EdECObjectIdentifiers.id_Ed25519 => Right(SigningKeyScheme.Ed25519) + case unsupportedIdentifier => + Left(JavaKeyConversionError.UnsupportedAlgorithm(algoId)) + } + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def convertPublicKeyFromPemToDer(pubKeyPEM: String): Either[String, ByteString] = { + val pemParser: PEMParser = new PEMParser(new StringReader(pubKeyPEM)) + try { + Option(pemParser.readObject) match { + case Some(spki: SubjectPublicKeyInfo) => + Right(ByteString.copyFrom(spki.getEncoded)) + case Some(_) => + Left("unexpected type conversion") + case None => + Left("could not parse public key info from PEM format") + } + } catch { + case e: IOException => + Left( + s"failed to convert public key from PEM to DER format: ${ErrorUtil.messageWithStacktrace(e)}" + ) + } finally { + pemParser.close() + } + } + + def toEncryptionKeyScheme( + algoId: AlgorithmIdentifier + ): Either[JavaKeyConversionError, EncryptionKeyScheme] = + algoId.getAlgorithm match { + case X9ObjectIdentifiers.ecdsa_with_SHA256 | SECObjectIdentifiers.secp256r1 => + // TODO(i12757): It maps to two ECIES schemes right now, need to disentangle key scheme from algorithm + Right(EncryptionKeyScheme.EciesP256HkdfHmacSha256Aes128Gcm) + case PKCSObjectIdentifiers.id_RSAES_OAEP => + Right(EncryptionKeyScheme.Rsa2048OaepSha256) + case unsupportedIdentifier => + Left(JavaKeyConversionError.UnsupportedAlgorithm(algoId)) + } + +} + +sealed trait JavaKeyConversionError extends Product with Serializable with PrettyPrinting + +object JavaKeyConversionError { + + final case class GeneralError(error: Exception) extends JavaKeyConversionError { + override def pretty: Pretty[GeneralError] = + prettyOfClass(unnamedParam(_.error)) + } + + final case class UnsupportedAlgorithm(algorithmIdentifier: AlgorithmIdentifier) + extends JavaKeyConversionError { + override def pretty: Pretty[UnsupportedAlgorithm] = + prettyOfClass(unnamedParam(_.algorithmIdentifier.toString.unquoted)) + } + + final case class UnsupportedKeyFormat(format: CryptoKeyFormat, expectedFormat: CryptoKeyFormat) + extends JavaKeyConversionError { + override def pretty: Pretty[UnsupportedKeyFormat] = + prettyOfClass(param("format", _.format), param("expected format", _.expectedFormat)) + } + + final case class UnsupportedSigningKeyScheme( + scheme: SigningKeyScheme, + supportedSchemes: NonEmpty[Set[SigningKeyScheme]], + ) extends JavaKeyConversionError { + override def pretty: Pretty[UnsupportedSigningKeyScheme] = + prettyOfClass( + param("scheme", _.scheme), + param("supported schemes", _.supportedSchemes), + ) + } + + final case class UnsupportedEncryptionKeyScheme( + scheme: EncryptionKeyScheme, + supportedSchemes: NonEmpty[Set[EncryptionKeyScheme]], + ) extends JavaKeyConversionError { + override def pretty: Pretty[UnsupportedEncryptionKeyScheme] = + prettyOfClass( + param("scheme", _.scheme), + param("supported schemes", _.supportedSchemes), + ) + } + + final case class KeyStoreError(error: String) extends JavaKeyConversionError { + override def pretty: Pretty[KeyStoreError] = + prettyOfClass(unnamedParam(_.error.unquoted)) + } + + final case class InvalidKey(error: String) extends JavaKeyConversionError { + override def pretty: Pretty[InvalidKey] = + prettyOfClass(unnamedParam(_.error.unquoted)) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Nonce.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Nonce.scala new file mode 100644 index 0000000000..5e5658efbb --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Nonce.scala @@ -0,0 +1,57 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto + +import com.digitalasset.canton.ProtoDeserializationError.CryptoDeserializationError +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{DefaultDeserializationError, HasCryptographicEvidence} +import com.digitalasset.canton.store.db.{DbDeserializationException, DbSerializationException} +import com.digitalasset.canton.util.HexString +import com.google.protobuf.ByteString +import slick.jdbc.{GetResult, SetParameter} + +final case class Nonce private (private val bytes: ByteString) extends HasCryptographicEvidence { + def toProtoPrimitive: ByteString = bytes + def toLengthLimitedHexString: String300 = + String300.tryCreate(HexString.toHexString(this.toProtoPrimitive)) + + override def getCryptographicEvidence: ByteString = bytes +} + +object Nonce { + + /** As of now, the database schemas can only handle nonces up to a length of 150 bytes. Thus the length of a [[Nonce]] should never exceed that. + * If we ever want to create a [[Nonce]] larger than that, we can increase it up to 500 bytes after which we are limited by Oracle length limits. + * See the documentation at [[com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString]] for more details. + */ + val length: Int = 20 + + implicit val setNonceParameter: SetParameter[Nonce] = + (nonce, pp) => pp >> nonce.toLengthLimitedHexString + + implicit val getNonceResult: GetResult[Nonce] = GetResult { r => + val hexString = r.nextString() + if (hexString.length > String300.maxLength) + throw new DbDeserializationException( + s"Base16-encoded authentication token of length ${hexString.length} exceeds allowed limit of ${String300.maxLength}." + ) + HexString + .parseToByteString(r.nextString()) + .map(new Nonce(_)) + .getOrElse(throw new DbSerializationException(s"Could not deserialize nonce from db")) + } + + def generate(randomOps: RandomOps): Nonce = new Nonce(randomOps.generateRandomByteString(length)) + + def fromProtoPrimitive(bytes: ByteString): ParsingResult[Nonce] = + Either.cond( + bytes.size() == length, + new Nonce(bytes), + CryptoDeserializationError( + DefaultDeserializationError(s"Nonce of invalid length: ${bytes.size()}") + ), + ) + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Random.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Random.scala new file mode 100644 index 0000000000..7b9cee36f1 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Random.scala @@ -0,0 +1,68 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto + +import com.digitalasset.canton.serialization.{ + DefaultDeserializationError, + DeserializationError, + HasCryptographicEvidence, +} +import com.digitalasset.canton.version.{HasVersionedToByteString, ProtocolVersion} +import com.google.protobuf.ByteString + +import scala.util.Random + +trait RandomOps { + + protected def generateRandomBytes(length: Int): Array[Byte] + + def generateRandomByteString(length: Int): ByteString = + ByteString.copyFrom(generateRandomBytes(length)) + + def generateSecureRandomness(length: Int): SecureRandomness = SecureRandomness( + generateRandomByteString(length) + ) +} + +/** The class is a tag that denotes a byte string as a securely generated random value. + * + * Not an AnyVal as we also want it to be a serializable value such that we can encrypt it. + */ +final case class SecureRandomness private[crypto] (unwrap: ByteString) + extends HasCryptographicEvidence + with HasVersionedToByteString { + override def toByteString(version: ProtocolVersion): ByteString = getCryptographicEvidence + + override def getCryptographicEvidence: ByteString = unwrap +} + +/** Cryptographically-secure randomness */ +object SecureRandomness { + + /** Recover secure randomness from a byte string. Use for deserialization only. Fails if the provided byte string + * is not of the expected length. + */ + def fromByteString( + expectedLength: Int + )(bytes: ByteString): Either[DeserializationError, SecureRandomness] = { + if (bytes.size != expectedLength) + Left( + DefaultDeserializationError( + s"Expected $expectedLength bytes of serialized randomness, got ${bytes.size}" + ) + ) + else Right(SecureRandomness(bytes)) + } +} + +/** Pseudo randomness, MUST NOT be used for security-relevant operations. */ +object PseudoRandom { + + private val rand = new Random(new java.security.SecureRandom()) + + def randomAlphaNumericString(length: Int): String = rand.alphanumeric.take(length).mkString + + def randomUnsigned(maxValue: Int): Int = rand.between(0, maxValue) + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Salt.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Salt.scala new file mode 100644 index 0000000000..75de5b8565 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Salt.scala @@ -0,0 +1,167 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto + +import cats.syntax.either.* +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.CantonContractIdVersion +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{DefaultDeserializationError, DeterministicEncoding} +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.ByteString + +/** A seed to derive further salts from. + * + * Unlike [[Salt]] this seed will not be shipped to another participant. + */ +abstract sealed case class SaltSeed(unwrap: ByteString) + +object SaltSeed { + + /** Default length for a salt seed is 128 bits */ + val defaultLength = 16 + + private[crypto] def apply(bytes: ByteString): SaltSeed = + new SaltSeed(bytes) {} + + def generate(length: Int = defaultLength)(randomOps: RandomOps): SaltSeed = + SaltSeed(randomOps.generateRandomByteString(length)) +} + +/** Indicates the algorithm used to generate and derive salts. */ +sealed trait SaltAlgorithm extends Product with Serializable with PrettyPrinting { + def toProtoOneOf: v0.Salt.Algorithm + def length: Long +} + +object SaltAlgorithm { + + /** Uses an HMAC algorithm as a pseudo-random function to generate/derive salts. */ + final case class Hmac(hmacAlgorithm: HmacAlgorithm) extends SaltAlgorithm { + override def toProtoOneOf: v0.Salt.Algorithm = v0.Salt.Algorithm.Hmac(hmacAlgorithm.toProtoEnum) + override def length: Long = hmacAlgorithm.hashAlgorithm.length + override def pretty: Pretty[Hmac] = prettyOfClass( + param("hmacAlgorithm", _.hmacAlgorithm.name.unquoted) + ) + } + + def fromProtoOneOf( + field: String, + saltAlgorithmP: v0.Salt.Algorithm, + ): ParsingResult[SaltAlgorithm] = + saltAlgorithmP match { + case v0.Salt.Algorithm.Empty => Left(ProtoDeserializationError.FieldNotSet(field)) + case v0.Salt.Algorithm.Hmac(hmacAlgorithmP) => + HmacAlgorithm.fromProtoEnum("hmac", hmacAlgorithmP).map(Hmac) + } +} + +/** A (pseudo-)random salt used for hashing to prevent pre-computed hash attacks. + * + * The algorithm that was used to generate/derive the salt is kept to support the verification of the salt generation. + */ +final case class Salt private (private val salt: ByteString, private val algorithm: SaltAlgorithm) + extends PrettyPrinting { + + require(!salt.isEmpty, "Salt must not be empty") + require( + salt.size() == algorithm.length, + s"Salt size ${salt.size()} must match salt algorithm length ${algorithm.length}", + ) + + /** Returns the serialization used for networking/storing, must NOT be used for hashing. */ + def toProtoV0: v0.Salt = v0.Salt(salt = salt, algorithm = algorithm.toProtoOneOf) + + /** Returns the salt used for hashing, must NOT be used for networking/storing. */ + def forHashing: ByteString = salt + + def size: Int = salt.size() + + @VisibleForTesting + private[crypto] def unwrap: ByteString = salt + + override val pretty: Pretty[Salt] = prettyOfParam(_.salt) +} + +object Salt { + + private[crypto] def create(bytes: ByteString, algorithm: SaltAlgorithm): Either[SaltError, Salt] = + Either.cond( + !bytes.isEmpty && bytes.size() == algorithm.length, + new Salt(bytes, algorithm), + SaltError.InvalidSaltCreation(bytes, algorithm), + ) + + private def deriveSalt( + seed: ByteString, + bytes: ByteString, + hmacOps: HmacOps, + ): Either[SaltError, Salt] = + for { + pseudoSecret <- HmacSecret + .create(seed) + .leftMap(SaltError.HmacGenerationError) + saltAlgorithm = SaltAlgorithm.Hmac(hmacOps.defaultHmacAlgorithm) + hmac <- hmacOps + .hmacWithSecret(pseudoSecret, bytes, saltAlgorithm.hmacAlgorithm) + .leftMap(SaltError.HmacGenerationError) + salt <- create(hmac.unwrap, saltAlgorithm) + } yield salt + + /** Derives a salt from a `seed` salt and an `index`. */ + def deriveSalt(seed: SaltSeed, index: Int, hmacOps: HmacOps): Either[SaltError, Salt] = { + deriveSalt(seed, DeterministicEncoding.encodeInt(index), hmacOps) + } + + def tryDeriveSalt(seed: SaltSeed, index: Int, hmacOps: HmacOps): Salt = { + deriveSalt(seed, index, hmacOps).valueOr(err => throw new IllegalStateException(err.toString)) + } + + /** Derives a salt from a `seed` salt and `bytes` using an HMAC as a pseudo-random function. */ + def deriveSalt(seed: SaltSeed, bytes: ByteString, hmacOps: HmacOps): Either[SaltError, Salt] = + deriveSalt(seed.unwrap, bytes, hmacOps) + + def tryDeriveSalt(seed: SaltSeed, bytes: ByteString, hmacOps: HmacOps): Salt = + deriveSalt(seed, bytes, hmacOps).valueOr(err => throw new IllegalStateException(err.toString)) + + def tryDeriveSalt( + seed: Salt, + bytes: ByteString, + contractIdVersion: CantonContractIdVersion, + hmacOps: HmacOps, + ): Salt = + deriveSalt(seed.forHashing, bytes, hmacOps).valueOr(err => + throw new IllegalStateException(err.toString) + ) + + def fromProtoV0(saltP: v0.Salt): ParsingResult[Salt] = + for { + saltAlgorithm <- SaltAlgorithm.fromProtoOneOf("algorithm", saltP.algorithm) + salt <- create(saltP.salt, saltAlgorithm).leftMap(err => + ProtoDeserializationError.CryptoDeserializationError( + DefaultDeserializationError(err.toString) + ) + ) + } yield salt +} + +sealed trait SaltError extends Product with Serializable with PrettyPrinting + +object SaltError { + final case class InvalidSaltCreation(bytes: ByteString, algorithm: SaltAlgorithm) + extends SaltError { + override def pretty: Pretty[InvalidSaltCreation] = + prettyOfClass( + param("bytes", _.bytes), + param("algorithm", _.algorithm), + ) + } + + final case class HmacGenerationError(error: HmacError) extends SaltError { + override def pretty: Pretty[HmacGenerationError] = prettyOfClass( + param("error", _.error) + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Signing.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Signing.scala new file mode 100644 index 0000000000..2d89a0b0c3 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/Signing.scala @@ -0,0 +1,541 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto + +import cats.Order +import cats.data.EitherT +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.crypto.store.{ + CryptoPrivateStoreError, + CryptoPrivateStoreExtended, + CryptoPublicStoreError, +} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.Member +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.NoCopy +import com.digitalasset.canton.version.{ + HasVersionedMessageCompanion, + HasVersionedMessageCompanionDbHelpers, + HasVersionedWrapper, + ProtoVersion, + ProtocolVersion, +} +import com.google.protobuf.ByteString +import slick.jdbc.GetResult + +import scala.concurrent.{ExecutionContext, Future} + +/** Signing operations that do not require access to a private key store but operates with provided keys. */ +trait SigningOps { + + /** Signs the given hash using the private signing key. */ + def sign(hash: Hash, signingKey: SigningPrivateKey): Either[SigningError, Signature] = + sign(hash.getCryptographicEvidence, signingKey) + + protected[crypto] def sign( + bytes: ByteString, + signingKey: SigningPrivateKey, + ): Either[SigningError, Signature] + + /** Confirms if the provided signature is a valid signature of the payload using the public key */ + def verifySignature( + hash: Hash, + publicKey: SigningPublicKey, + signature: Signature, + ): Either[SignatureCheckError, Unit] = + verifySignature(hash.getCryptographicEvidence, publicKey, signature) + + protected[crypto] def verifySignature( + bytes: ByteString, + publicKey: SigningPublicKey, + signature: Signature, + ): Either[SignatureCheckError, Unit] +} + +/** Signing operations that require access to stored private keys. */ +trait SigningPrivateOps { + + def defaultSigningKeyScheme: SigningKeyScheme + + /** Signs the given hash using the referenced private signing key. */ + def sign(hash: Hash, signingKeyId: Fingerprint)(implicit + tc: TraceContext + ): EitherT[Future, SigningError, Signature] = + sign(hash.getCryptographicEvidence, signingKeyId) + + /** Signs the byte string directly, however it is encouraged to sign a hash. */ + protected[crypto] def sign( + bytes: ByteString, + signingKeyId: Fingerprint, + )(implicit tc: TraceContext): EitherT[Future, SigningError, Signature] + + /** Generates a new signing key pair with the given scheme and optional name, stores the private key and returns the public key. */ + def generateSigningKey( + scheme: SigningKeyScheme = defaultSigningKeyScheme, + name: Option[KeyName] = None, + )(implicit + traceContext: TraceContext + ): EitherT[Future, SigningKeyGenerationError, SigningPublicKey] + +} + +/** A default implementation with a private key store */ +trait SigningPrivateStoreOps extends SigningPrivateOps { + + implicit val ec: ExecutionContext + + protected val store: CryptoPrivateStoreExtended + + protected val signingOps: SigningOps + + protected[crypto] def sign( + bytes: ByteString, + signingKeyId: Fingerprint, + )(implicit tc: TraceContext): EitherT[Future, SigningError, Signature] = + store + .signingKey(signingKeyId)(TraceContext.todo) + .leftMap(storeError => SigningError.KeyStoreError(storeError.show)) + .subflatMap(_.toRight(SigningError.UnknownSigningKey(signingKeyId))) + .subflatMap(signingKey => signingOps.sign(bytes, signingKey)) + + /** Internal method to generate and return the entire signing key pair */ + protected[crypto] def generateSigningKeypair(scheme: SigningKeyScheme)(implicit + traceContext: TraceContext + ): EitherT[Future, SigningKeyGenerationError, SigningKeyPair] + + def generateSigningKey( + scheme: SigningKeyScheme, + name: Option[KeyName], + )(implicit + traceContext: TraceContext + ): EitherT[Future, SigningKeyGenerationError, SigningPublicKey] = + for { + keypair <- generateSigningKeypair(scheme) + _ <- store + .storeSigningKey(keypair.privateKey, name) + .leftMap[SigningKeyGenerationError](SigningKeyGenerationError.SigningPrivateStoreError) + } yield keypair.publicKey + +} + +final case class Signature private[crypto] ( + format: SignatureFormat, + private val signature: ByteString, + signedBy: Fingerprint, +) extends HasVersionedWrapper[Signature] + with PrettyPrinting + with NoCopy { + + override protected def companionObj = Signature + + def toProtoV0: v0.Signature = + v0.Signature( + format = format.toProtoEnum, + signature = signature, + signedBy = signedBy.toProtoPrimitive, + ) + + override def pretty: Pretty[Signature] = + prettyOfClass(param("signature", _.signature), param("signedBy", _.signedBy)) + + /** Access to the raw signature, must NOT be used for serialization */ + private[crypto] def unwrap: ByteString = signature +} + +object Signature + extends HasVersionedMessageCompanion[Signature] + with HasVersionedMessageCompanionDbHelpers[Signature] { + val noSignature = + new Signature( + SignatureFormat.Raw, + ByteString.EMPTY, + Fingerprint.tryCreate("no-fingerprint"), + ) + val noSignatures = NonEmpty(Set, noSignature) + + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> ProtoCodec( + ProtocolVersion.v30, + supportedProtoVersion(v0.Signature)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + override def name: String = "signature" + + private[this] def apply( + format: SignatureFormat, + signature: ByteString, + signedBy: Fingerprint, + ): Signature = + throw new UnsupportedOperationException("Use deserialization method instead") + + def fromProtoV0(signatureP: v0.Signature): ParsingResult[Signature] = + for { + format <- SignatureFormat.fromProtoEnum("format", signatureP.format) + signature = signatureP.signature + signedBy <- Fingerprint.fromProtoPrimitive(signatureP.signedBy) + } yield new Signature(format, signature, signedBy) +} + +sealed trait SignatureFormat extends Product with Serializable { + def toProtoEnum: v0.SignatureFormat +} + +object SignatureFormat { + case object Raw extends SignatureFormat { + override def toProtoEnum: v0.SignatureFormat = v0.SignatureFormat.RawSignatureFormat + } + + def fromProtoEnum( + field: String, + formatP: v0.SignatureFormat, + ): ParsingResult[SignatureFormat] = + formatP match { + case v0.SignatureFormat.MissingSignatureFormat => + Left(ProtoDeserializationError.FieldNotSet(field)) + case v0.SignatureFormat.Unrecognized(value) => + Left(ProtoDeserializationError.UnrecognizedEnum(field, value)) + case v0.SignatureFormat.RawSignatureFormat => Right(SignatureFormat.Raw) + } +} + +sealed trait SigningKeyScheme extends Product with Serializable with PrettyPrinting { + def name: String + def toProtoEnum: v0.SigningKeyScheme + def pretty: Pretty[this.type] = prettyOfString(_.name) +} + +/** Schemes for signature keys. + * + * Ed25519 is the best performing curve and should be the default. + * EC-DSA is slower than Ed25519 but has better compatibility with other systems (such as CCF). + */ +object SigningKeyScheme { + implicit val signingKeySchemeOrder: Order[SigningKeyScheme] = + Order.by[SigningKeyScheme, String](_.name) + + case object Ed25519 extends SigningKeyScheme { + override val name: String = "Ed25519" + override def toProtoEnum: v0.SigningKeyScheme = v0.SigningKeyScheme.Ed25519 + } + + case object EcDsaP256 extends SigningKeyScheme { + override def name: String = "ECDSA-P256" + override def toProtoEnum: v0.SigningKeyScheme = v0.SigningKeyScheme.EcDsaP256 + } + + case object EcDsaP384 extends SigningKeyScheme { + override def name: String = "ECDSA-P384" + override def toProtoEnum: v0.SigningKeyScheme = v0.SigningKeyScheme.EcDsaP384 + } + + val EdDsaSchemes: NonEmpty[Set[SigningKeyScheme]] = NonEmpty.mk(Set, Ed25519) + val EcDsaSchemes: NonEmpty[Set[SigningKeyScheme]] = NonEmpty.mk(Set, EcDsaP256, EcDsaP384) + + def fromProtoEnum( + field: String, + schemeP: v0.SigningKeyScheme, + ): ParsingResult[SigningKeyScheme] = + schemeP match { + case v0.SigningKeyScheme.MissingSigningKeyScheme => + Left(ProtoDeserializationError.FieldNotSet(field)) + case v0.SigningKeyScheme.Unrecognized(value) => + Left(ProtoDeserializationError.UnrecognizedEnum(field, value)) + case v0.SigningKeyScheme.Ed25519 => Right(SigningKeyScheme.Ed25519) + case v0.SigningKeyScheme.EcDsaP256 => Right(SigningKeyScheme.EcDsaP256) + case v0.SigningKeyScheme.EcDsaP384 => Right(SigningKeyScheme.EcDsaP384) + case v0.SigningKeyScheme.Sm2 => + Left(ProtoDeserializationError.OtherError("Support for SM2 has been removed since v2.7")) + } +} + +final case class SigningKeyPair(publicKey: SigningPublicKey, privateKey: SigningPrivateKey) + extends CryptoKeyPair[SigningPublicKey, SigningPrivateKey] + with NoCopy { + + protected def toProtoV0: v0.SigningKeyPair = + v0.SigningKeyPair(Some(publicKey.toProtoV0), Some(privateKey.toProtoV0)) + + protected def toProtoCryptoKeyPairPairV0: v0.CryptoKeyPair.Pair = + v0.CryptoKeyPair.Pair.SigningKeyPair(toProtoV0) +} + +object SigningKeyPair { + + private[this] def apply( + publicKey: SigningPublicKey, + privateKey: SigningPrivateKey, + ): SigningKeyPair = + throw new UnsupportedOperationException("Use generate or deserialization methods") + + private[crypto] def create( + id: Fingerprint, + format: CryptoKeyFormat, + publicKeyBytes: ByteString, + privateKeyBytes: ByteString, + scheme: SigningKeyScheme, + ): SigningKeyPair = { + val publicKey = new SigningPublicKey(id, format, publicKeyBytes, scheme) + val privateKey = new SigningPrivateKey(publicKey.id, format, privateKeyBytes, scheme) + new SigningKeyPair(publicKey, privateKey) + } + + def fromProtoV0( + signingKeyPairP: v0.SigningKeyPair + ): ParsingResult[SigningKeyPair] = + for { + publicKey <- ProtoConverter.parseRequired( + SigningPublicKey.fromProtoV0, + "public_key", + signingKeyPairP.publicKey, + ) + privateKey <- ProtoConverter.parseRequired( + SigningPrivateKey.fromProtoV0, + "private_key", + signingKeyPairP.privateKey, + ) + } yield new SigningKeyPair(publicKey, privateKey) +} + +@SuppressWarnings(Array("org.wartremover.warts.FinalCaseClass")) // This class is mocked in tests +case class SigningPublicKey private[crypto] ( + id: Fingerprint, + format: CryptoKeyFormat, + protected[crypto] val key: ByteString, + scheme: SigningKeyScheme, +) extends PublicKey + with PrettyPrinting + with NoCopy + with HasVersionedWrapper[SigningPublicKey] { + override val purpose: KeyPurpose = KeyPurpose.Signing + + override protected def companionObj = SigningPublicKey + + def toProtoV0: v0.SigningPublicKey = + v0.SigningPublicKey( + id = id.toProtoPrimitive, + format = format.toProtoEnum, + publicKey = key, + scheme = scheme.toProtoEnum, + ) + + override protected def toProtoPublicKeyKeyV0: v0.PublicKey.Key = + v0.PublicKey.Key.SigningPublicKey(toProtoV0) + + override def pretty: Pretty[SigningPublicKey] = + prettyOfClass(param("id", _.id), param("format", _.format), param("scheme", _.scheme)) +} + +object SigningPublicKey + extends HasVersionedMessageCompanion[SigningPublicKey] + with HasVersionedMessageCompanionDbHelpers[SigningPublicKey] { + override def name: String = "signing public key" + + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> ProtoCodec( + ProtocolVersion.v30, + supportedProtoVersion(v0.SigningPublicKey)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + private[this] def apply( + id: Fingerprint, + format: CryptoKeyFormat, + key: ByteString, + scheme: SigningKeyScheme, + ): SigningPrivateKey = + throw new UnsupportedOperationException("Use keypair generate or deserialization methods") + + def fromProtoV0( + publicKeyP: v0.SigningPublicKey + ): ParsingResult[SigningPublicKey] = + for { + id <- Fingerprint.fromProtoPrimitive(publicKeyP.id) + format <- CryptoKeyFormat.fromProtoEnum("format", publicKeyP.format) + scheme <- SigningKeyScheme.fromProtoEnum("scheme", publicKeyP.scheme) + } yield new SigningPublicKey(id, format, publicKeyP.publicKey, scheme) + + def collect(initialKeys: Map[Member, Seq[PublicKey]]): Map[Member, Seq[SigningPublicKey]] = + initialKeys.map { case (k, v) => + (k, v.collect { case x: SigningPublicKey => x }) + } + +} + +final case class SigningPublicKeyWithName( + override val publicKey: SigningPublicKey, + override val name: Option[KeyName], +) extends PublicKeyWithName { + type K = SigningPublicKey +} + +object SigningPublicKeyWithName { + implicit def getResultSigningPublicKeyWithName(implicit + getResultByteArray: GetResult[Array[Byte]] + ): GetResult[SigningPublicKeyWithName] = GetResult { r => + SigningPublicKeyWithName(r.<<, r.<<) + } +} + +final case class SigningPrivateKey private[crypto] ( + id: Fingerprint, + format: CryptoKeyFormat, + protected[crypto] val key: ByteString, + scheme: SigningKeyScheme, +) extends PrivateKey + with HasVersionedWrapper[SigningPrivateKey] + with NoCopy { + + override protected def companionObj = SigningPrivateKey + + def toProtoV0: v0.SigningPrivateKey = + v0.SigningPrivateKey( + id = id.toProtoPrimitive, + format = format.toProtoEnum, + privateKey = key, + scheme = scheme.toProtoEnum, + ) + + override def purpose: KeyPurpose = KeyPurpose.Signing + + override protected def toProtoPrivateKeyKeyV0: v0.PrivateKey.Key = + v0.PrivateKey.Key.SigningPrivateKey(toProtoV0) +} + +object SigningPrivateKey extends HasVersionedMessageCompanion[SigningPrivateKey] { + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> ProtoCodec( + ProtocolVersion.v30, + supportedProtoVersion(v0.SigningPrivateKey)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + override def name: String = "signing private key" + + private[this] def apply( + id: Fingerprint, + format: CryptoKeyFormat, + key: ByteString, + scheme: SigningKeyScheme, + ): SigningPrivateKey = + throw new UnsupportedOperationException("Use keypair generate or deserialization methods") + + def fromProtoV0( + privateKeyP: v0.SigningPrivateKey + ): ParsingResult[SigningPrivateKey] = + for { + id <- Fingerprint.fromProtoPrimitive(privateKeyP.id) + format <- CryptoKeyFormat.fromProtoEnum("format", privateKeyP.format) + scheme <- SigningKeyScheme.fromProtoEnum("scheme", privateKeyP.scheme) + } yield new SigningPrivateKey(id, format, privateKeyP.privateKey, scheme) + +} + +sealed trait SigningError extends Product with Serializable with PrettyPrinting +object SigningError { + + final case class GeneralError(error: Exception) extends SigningError { + override def pretty: Pretty[GeneralError] = prettyOfClass(unnamedParam(_.error)) + } + + final case class InvariantViolation(error: String) extends SigningError { + override def pretty: Pretty[InvariantViolation] = prettyOfClass(unnamedParam(_.error.unquoted)) + } + + final case class InvalidSigningKey(error: String) extends SigningError { + override def pretty: Pretty[InvalidSigningKey] = prettyOfClass(unnamedParam(_.error.unquoted)) + } + + final case class UnknownSigningKey(keyId: Fingerprint) extends SigningError { + override def pretty: Pretty[UnknownSigningKey] = prettyOfClass(param("keyId", _.keyId)) + } + + final case class FailedToSign(error: String) extends SigningError { + override def pretty: Pretty[FailedToSign] = prettyOfClass(unnamedParam(_.error.unquoted)) + } + + final case class KeyStoreError(error: String) extends SigningError { + override def pretty: Pretty[KeyStoreError] = prettyOfClass(unnamedParam(_.error.unquoted)) + } +} + +sealed trait SigningKeyGenerationError extends Product with Serializable with PrettyPrinting +object SigningKeyGenerationError { + + final case class GeneralError(error: Exception) extends SigningKeyGenerationError { + override def pretty: Pretty[GeneralError] = prettyOfClass(unnamedParam(_.error)) + } + + final case class GeneralKmsError(error: String) extends SigningKeyGenerationError { + override def pretty: Pretty[GeneralKmsError] = prettyOfClass(unnamedParam(_.error.unquoted)) + } + + final case class NameInvalidError(error: String) extends SigningKeyGenerationError { + override def pretty: Pretty[NameInvalidError] = prettyOfClass(unnamedParam(_.error.unquoted)) + } + + final case class FingerprintError(error: String) extends SigningKeyGenerationError { + override def pretty: Pretty[FingerprintError] = prettyOfClass(unnamedParam(_.error.unquoted)) + } + + final case class UnsupportedKeyScheme(scheme: SigningKeyScheme) + extends SigningKeyGenerationError { + override def pretty: Pretty[UnsupportedKeyScheme] = prettyOfClass(param("scheme", _.scheme)) + } + + final case class SigningPrivateStoreError(error: CryptoPrivateStoreError) + extends SigningKeyGenerationError { + override def pretty: Pretty[SigningPrivateStoreError] = prettyOfClass(unnamedParam(_.error)) + } + + final case class SigningPublicStoreError(error: CryptoPublicStoreError) + extends SigningKeyGenerationError { + override def pretty: Pretty[SigningPublicStoreError] = prettyOfClass(unnamedParam(_.error)) + } +} + +sealed trait SignatureCheckError extends Product with Serializable with PrettyPrinting +object SignatureCheckError { + + final case class MultipleErrors(errors: Seq[SignatureCheckError], message: Option[String] = None) + extends SignatureCheckError { + override def pretty: Pretty[MultipleErrors] = prettyOfClass[MultipleErrors]( + paramIfDefined("message", _.message.map(_.unquoted)), + param("errors", _.errors), + ) + } + + final case class InvalidSignature(signature: Signature, bytes: ByteString, error: String) + extends SignatureCheckError { + override def pretty: Pretty[InvalidSignature] = + prettyOfClass( + param("signature", _.signature), + param("bytes", _.bytes), + param("error", _.error.doubleQuoted), + ) + } + final case class InvalidKeyError(message: String) extends SignatureCheckError { + override def pretty: Pretty[InvalidKeyError] = prettyOfClass(unnamedParam(_.message.unquoted)) + } + final case class GeneralError(error: Exception) extends SignatureCheckError { + override def pretty: Pretty[GeneralError] = prettyOfClass(unnamedParam(_.error)) + } + final case class SignatureWithWrongKey(message: String) extends SignatureCheckError { + override def pretty: Pretty[SignatureWithWrongKey] = prettyOfClass( + unnamedParam(_.message.unquoted) + ) + } + final case class SignerHasNoValidKeys(message: String) extends SignatureCheckError { + override def pretty: Pretty[SignerHasNoValidKeys] = prettyOfClass( + unnamedParam(_.message.unquoted) + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/SyncCryptoApiProvider.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/SyncCryptoApiProvider.scala new file mode 100644 index 0000000000..ec8b3702eb --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/SyncCryptoApiProvider.scala @@ -0,0 +1,642 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto + +import cats.Monad +import cats.data.EitherT +import cats.implicits.catsSyntaxValidatedId +import cats.syntax.alternative.* +import cats.syntax.either.* +import cats.syntax.flatMap.* +import cats.syntax.functor.* +import cats.syntax.parallel.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.concurrent.{FutureSupervisor, HasFutureSupervision} +import com.digitalasset.canton.config.{CacheConfig, CachingConfigs, ProcessingTimeout} +import com.digitalasset.canton.crypto.SignatureCheckError.{ + SignatureWithWrongKey, + SignerHasNoValidKeys, +} +import com.digitalasset.canton.crypto.SyncCryptoError.{KeyNotAvailable, SyncCryptoEncryptionError} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.{ + CloseContext, + FlagCloseable, + FutureUnlessShutdown, + Lifecycle, +} +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.protocol.DynamicDomainParameters +import com.digitalasset.canton.serialization.DeserializationError +import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.client.{ + DomainTopologyClient, + IdentityProvidingServiceClient, + TopologyClientApi, + TopologySnapshot, +} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.FutureInstances.* +import com.digitalasset.canton.util.LoggerUtil +import com.digitalasset.canton.version.{HasVersionedToByteString, ProtocolVersion} +import com.digitalasset.canton.{DomainAlias, checked} +import com.google.protobuf.ByteString +import org.slf4j.event.Level + +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future} + +/** Crypto API Provider class + * + * The utility class combines the information provided by the IPSclient, the pure crypto functions + * and the signing and decryption operations on a private key vault in order to automatically resolve + * the right keys to use for signing / decryption based on domain and timestamp. + */ +class SyncCryptoApiProvider( + val member: Member, + val ips: IdentityProvidingServiceClient, + val crypto: Crypto, + cachingConfigs: CachingConfigs, + timeouts: ProcessingTimeout, + futureSupervisor: FutureSupervisor, + loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) { + + require(ips != null) + + def pureCrypto: CryptoPureApi = crypto.pureCrypto + + def tryForDomain(domain: DomainId, alias: Option[DomainAlias] = None): DomainSyncCryptoClient = + new DomainSyncCryptoClient( + member, + domain, + ips.tryForDomain(domain), + crypto, + cachingConfigs, + timeouts, + futureSupervisor, + loggerFactory.append("domainId", domain.toString), + ) + + def forDomain(domain: DomainId): Option[DomainSyncCryptoClient] = + for { + dips <- ips.forDomain(domain) + } yield new DomainSyncCryptoClient( + member, + domain, + dips, + crypto, + cachingConfigs, + timeouts, + futureSupervisor, + loggerFactory, + ) +} + +trait SyncCryptoClient[+T <: SyncCryptoApi] extends TopologyClientApi[T] { + this: HasFutureSupervision => + + def pureCrypto: CryptoPureApi + + /** Returns a snapshot of the current member topology for the given domain. + * The future will log a warning and await the snapshot if the data is not there yet. + * + * The snapshot returned by this method should be used for validating transaction and transfer requests (Phase 2 - 7). + * Use the request timestamp as parameter for this method. + * Do not use a response or result timestamp, because all validation steps must use the same topology snapshot. + */ + def ipsSnapshot(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[TopologySnapshot] + + /** Returns a snapshot of the current member topology for the given domain + * + * The future will wait for the data if the data is not there yet. + * + * The snapshot returned by this method should be used for validating transaction and transfer requests (Phase 2 - 7). + * Use the request timestamp as parameter for this method. + * Do not use a response or result timestamp, because all validation steps must use the same topology snapshot. + */ + def awaitIpsSnapshot(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[TopologySnapshot] + + def awaitIpsSnapshotUS(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[TopologySnapshot] + + def awaitIpsSnapshotUSSupervised(description: => String, warnAfter: Duration = 10.seconds)( + timestamp: CantonTimestamp + )(implicit traceContext: TraceContext): FutureUnlessShutdown[TopologySnapshot] = + supervisedUS(description, warnAfter)(awaitIpsSnapshotUS(timestamp)) + +} + +object SyncCryptoClient { + + /** Computes the snapshot for the desired timestamp, assuming that the last (relevant) update to the + * topology state happened at or before `previousTimestamp`. + * If `previousTimestampO` is [[scala.None$]] and `desiredTimestamp` is currently not known + * [[com.digitalasset.canton.topology.client.TopologyClientApi.topologyKnownUntilTimestamp]], + * then the current approximation is returned and if `warnIfApproximate` is set a warning is logged. + */ + def getSnapshotForTimestampUS( + client: SyncCryptoClient[SyncCryptoApi], + desiredTimestamp: CantonTimestamp, + previousTimestampO: Option[CantonTimestamp], + protocolVersion: ProtocolVersion, + warnIfApproximate: Boolean = true, + )(implicit + executionContext: ExecutionContext, + loggingContext: ErrorLoggingContext, + closeContext: CloseContext, + ): FutureUnlessShutdown[SyncCryptoApi] = getSnapshotForTimestampInternal[FutureUnlessShutdown]( + client, + desiredTimestamp, + previousTimestampO, + warnIfApproximate, + )( + (timestamp, traceContext) => client.snapshotUS(timestamp)(traceContext), + (description, timestamp, traceContext) => + client.awaitSnapshotUSSupervised(description)(timestamp)(traceContext), + { (snapshot, traceContext) => + { + closeContext.context.performUnlessClosingF( + "get-dynamic-domain-parameters" + ) { + snapshot + .findDynamicDomainParametersOrDefault( + protocolVersion = protocolVersion, + warnOnUsingDefault = false, + )(traceContext) + }(executionContext, traceContext) + } + }, + ) + + /** Computes the snapshot for the desired timestamp, assuming that the last (relevant) update to the + * topology state happened at or before `previousTimestamp`. + * If `previousTimestampO` is [[scala.None$]] and `desiredTimestamp` is currently not known + * [[com.digitalasset.canton.topology.client.TopologyClientApi.topologyKnownUntilTimestamp]], + * then the current approximation is returned and if `warnIfApproximate` is set a warning is logged. + */ + def getSnapshotForTimestamp( + client: SyncCryptoClient[SyncCryptoApi], + desiredTimestamp: CantonTimestamp, + previousTimestampO: Option[CantonTimestamp], + protocolVersion: ProtocolVersion, + warnIfApproximate: Boolean = true, + )(implicit + executionContext: ExecutionContext, + loggingContext: ErrorLoggingContext, + ): Future[SyncCryptoApi] = { + getSnapshotForTimestampInternal[Future]( + client, + desiredTimestamp, + previousTimestampO, + warnIfApproximate, + )( + (timestamp, traceContext) => client.snapshot(timestamp)(traceContext), + (description, timestamp, traceContext) => + client.awaitSnapshotSupervised(description)(timestamp)(traceContext), + { (snapshot, traceContext) => + snapshot + .findDynamicDomainParametersOrDefault( + protocolVersion = protocolVersion, + warnOnUsingDefault = false, + )(traceContext) + }, + ) + } + + // Base version of getSnapshotForTimestamp abstracting over the effect type to allow for + // a `Future` and `FutureUnlessShutdown` version. Once we migrate all usages to the US version, this abstraction + // should not be needed anymore + private def getSnapshotForTimestampInternal[F[_]]( + client: SyncCryptoClient[SyncCryptoApi], + desiredTimestamp: CantonTimestamp, + previousTimestampO: Option[CantonTimestamp], + warnIfApproximate: Boolean = true, + )( + getSnapshot: (CantonTimestamp, TraceContext) => F[SyncCryptoApi], + awaitSnapshotSupervised: (String, CantonTimestamp, TraceContext) => F[SyncCryptoApi], + dynamicDomainParameters: (TopologySnapshot, TraceContext) => F[DynamicDomainParameters], + )(implicit + loggingContext: ErrorLoggingContext, + monad: Monad[F], + ): F[SyncCryptoApi] = { + implicit val traceContext: TraceContext = loggingContext.traceContext + val knownUntil = client.topologyKnownUntilTimestamp + if (desiredTimestamp <= knownUntil) { + getSnapshot(desiredTimestamp, traceContext) + } else { + loggingContext.logger.debug( + s"Waiting for topology snapshot at $desiredTimestamp; known until $knownUntil; previous $previousTimestampO" + ) + previousTimestampO match { + case None => + val approximateSnapshot = client.currentSnapshotApproximation + LoggerUtil.logAtLevel( + if (warnIfApproximate) Level.WARN else Level.INFO, + s"Using approximate topology snapshot at ${approximateSnapshot.ipsSnapshot.timestamp} for desired timestamp $desiredTimestamp", + ) + monad.pure(approximateSnapshot) + case Some(previousTimestamp) => + if (desiredTimestamp <= previousTimestamp.immediateSuccessor) + awaitSnapshotSupervised( + s"requesting topology snapshot at $desiredTimestamp with update timestamp $previousTimestamp and known until $knownUntil", + desiredTimestamp, + traceContext, + ) + else { + import scala.Ordered.orderingToOrdered + for { + previousSnapshot <- awaitSnapshotSupervised( + s"searching for topology change delay at $previousTimestamp for desired timestamp $desiredTimestamp and known until $knownUntil", + previousTimestamp, + traceContext, + ) + previousDomainParams <- dynamicDomainParameters( + previousSnapshot.ipsSnapshot, + traceContext, + ) + delay = previousDomainParams.topologyChangeDelay + diff = desiredTimestamp - previousTimestamp + snapshotTimestamp = + if (diff > delay.unwrap) { + // `desiredTimestamp` is larger than `previousTimestamp` plus the `delay`, + // so timestamps cannot overflow here + checked(previousTimestamp.plus(delay.unwrap).immediateSuccessor) + } else desiredTimestamp + desiredSnapshot <- awaitSnapshotSupervised( + s"requesting topology snapshot at $snapshotTimestamp for desired timestamp $desiredTimestamp given previous timestamp $previousTimestamp with topology change delay $delay", + snapshotTimestamp, + traceContext, + ) + } yield desiredSnapshot + } + } + } + } +} + +/** Crypto operations on a particular domain + */ +class DomainSyncCryptoClient( + val member: Member, + val domainId: DomainId, + val ips: DomainTopologyClient, + val crypto: Crypto, + cacheConfigs: CachingConfigs, + override val timeouts: ProcessingTimeout, + override protected val futureSupervisor: FutureSupervisor, + override val loggerFactory: NamedLoggerFactory, +)(implicit override protected val executionContext: ExecutionContext) + extends SyncCryptoClient[DomainSnapshotSyncCryptoApi] + with HasFutureSupervision + with NamedLogging + with FlagCloseable { + + override def pureCrypto: CryptoPureApi = crypto.pureCrypto + + override def snapshot(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[DomainSnapshotSyncCryptoApi] = + ips.snapshot(timestamp).map(create) + + override def snapshotUS(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[DomainSnapshotSyncCryptoApi] = + ips.snapshotUS(timestamp).map(create) + + override def trySnapshot(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): DomainSnapshotSyncCryptoApi = + create(ips.trySnapshot(timestamp)) + + override def headSnapshot(implicit traceContext: TraceContext): DomainSnapshotSyncCryptoApi = + create(ips.headSnapshot) + + override def awaitSnapshot(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[DomainSnapshotSyncCryptoApi] = + ips.awaitSnapshot(timestamp).map(create) + + override def awaitSnapshotUS(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[DomainSnapshotSyncCryptoApi] = + ips.awaitSnapshotUS(timestamp).map(create) + + private def create(snapshot: TopologySnapshot): DomainSnapshotSyncCryptoApi = { + new DomainSnapshotSyncCryptoApi( + member, + domainId, + snapshot, + crypto, + ts => EitherT(mySigningKeyCache.get(ts)), + cacheConfigs.keyCache, + loggerFactory, + ) + } + + private val mySigningKeyCache = cacheConfigs.mySigningKeyCache + .buildScaffeine() + .buildAsyncFuture[CantonTimestamp, Either[SyncCryptoError, Fingerprint]]( + findSigningKey(_).value + ) + + private def findSigningKey( + referenceTime: CantonTimestamp + ): EitherT[Future, SyncCryptoError, Fingerprint] = { + import TraceContext.Implicits.Empty.* + for { + snapshot <- EitherT.right(ipsSnapshot(referenceTime)) + signingKeys <- EitherT.right(snapshot.signingKeys(member)) + existingKeys <- signingKeys.toList + .parFilterA(pk => crypto.cryptoPrivateStore.existsSigningKey(pk.fingerprint)) + .leftMap[SyncCryptoError](SyncCryptoError.StoreError) + kk <- existingKeys.lastOption + .toRight[SyncCryptoError]( + SyncCryptoError + .KeyNotAvailable( + member, + KeyPurpose.Signing, + snapshot.timestamp, + signingKeys.map(_.fingerprint), + ) + ) + .toEitherT[Future] + } yield kk.fingerprint + + } + + override def ipsSnapshot(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[TopologySnapshot] = + ips.snapshot(timestamp) + + override def awaitIpsSnapshot(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[TopologySnapshot] = + ips.awaitSnapshot(timestamp) + + override def awaitIpsSnapshotUS(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[TopologySnapshot] = + ips.awaitSnapshotUS(timestamp) + + override def snapshotAvailable(timestamp: CantonTimestamp): Boolean = + ips.snapshotAvailable(timestamp) + + override def awaitTimestamp( + timestamp: CantonTimestamp, + waitForEffectiveTime: Boolean, + )(implicit traceContext: TraceContext): Option[Future[Unit]] = + ips.awaitTimestamp(timestamp, waitForEffectiveTime) + + override def awaitTimestampUS(timestamp: CantonTimestamp, waitForEffectiveTime: Boolean)(implicit + traceContext: TraceContext + ): Option[FutureUnlessShutdown[Unit]] = + ips.awaitTimestampUS(timestamp, waitForEffectiveTime) + + override def currentSnapshotApproximation(implicit + traceContext: TraceContext + ): DomainSnapshotSyncCryptoApi = + create(ips.currentSnapshotApproximation) + + override def topologyKnownUntilTimestamp: CantonTimestamp = ips.topologyKnownUntilTimestamp + + override def approximateTimestamp: CantonTimestamp = ips.approximateTimestamp + + override def onClosed(): Unit = Lifecycle.close(ips)(logger) +} + +/** crypto operations for a (domain,timestamp) */ +class DomainSnapshotSyncCryptoApi( + val member: Member, + val domainId: DomainId, + override val ipsSnapshot: TopologySnapshot, + val crypto: Crypto, + fetchSigningKey: CantonTimestamp => EitherT[Future, SyncCryptoError, Fingerprint], + validKeysCacheConfig: CacheConfig, + override protected val loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends SyncCryptoApi + with NamedLogging { + + override val pureCrypto: CryptoPureApi = crypto.pureCrypto + private val validKeysCache = + validKeysCacheConfig + .buildScaffeine() + .buildAsyncFuture[Member, Map[Fingerprint, SigningPublicKey]](loadSigningKeysForMember) + + /** Sign given hash with signing key for (member, domain, timestamp) + */ + override def sign( + hash: Hash + )(implicit traceContext: TraceContext): EitherT[Future, SyncCryptoError, Signature] = + for { + fingerprint <- fetchSigningKey(ipsSnapshot.referenceTime) + signature <- crypto.privateCrypto + .sign(hash, fingerprint) + .leftMap[SyncCryptoError](SyncCryptoError.SyncCryptoSigningError) + } yield signature + + private def loadSigningKeysForMember( + member: Member + ): Future[Map[Fingerprint, SigningPublicKey]] = + ipsSnapshot.signingKeys(member).map(_.map(x => (x.fingerprint, x)).toMap) + + private def verifySignature( + hash: Hash, + validKeys: Map[Fingerprint, SigningPublicKey], + signature: Signature, + signerStr: => String, + ): Either[SignatureCheckError, Unit] = { + lazy val signerStr_ = signerStr + def signatureCheckFailed(): Either[SignatureCheckError, Unit] = { + val error = + if (validKeys.isEmpty) + SignerHasNoValidKeys( + s"There are no valid keys for ${signerStr_} but received message signed with ${signature.signedBy}" + ) + else + SignatureWithWrongKey( + s"Key ${signature.signedBy} used to generate signature is not a valid key for ${signerStr_}. Valid keys are ${validKeys.values + .map(_.fingerprint.unwrap)}" + ) + Left(error) + } + validKeys.get(signature.signedBy) match { + case Some(key) => + crypto.pureCrypto.verifySignature(hash, key, signature) + case None => + signatureCheckFailed() + } + } + + override def verifySignature( + hash: Hash, + signer: Member, + signature: Signature, + ): EitherT[Future, SignatureCheckError, Unit] = { + for { + validKeys <- EitherT.right(validKeysCache.get(signer)) + res <- EitherT.fromEither[Future]( + verifySignature(hash, validKeys, signature, signer.toString) + ) + } yield res + } + + override def verifySignatures( + hash: Hash, + signer: Member, + signatures: NonEmpty[Seq[Signature]], + ): EitherT[Future, SignatureCheckError, Unit] = { + for { + validKeys <- EitherT.right(validKeysCache.get(signer)) + res <- signatures.forgetNE.parTraverse_ { signature => + EitherT.fromEither[Future](verifySignature(hash, validKeys, signature, signer.toString)) + } + } yield res + } + + override def verifySignatures( + hash: Hash, + mediatorGroupIndex: MediatorGroupIndex, + signatures: NonEmpty[Seq[Signature]], + )(implicit traceContext: TraceContext): EitherT[Future, SignatureCheckError, Unit] = { + for { + mediatorGroup <- EitherT( + ipsSnapshot.mediatorGroups().map { groups => + groups + .find(_.index == mediatorGroupIndex) + .toRight( + SignatureCheckError.GeneralError( + new RuntimeException( + s"Mediator request for unknown mediator group with index $mediatorGroupIndex" + ) + ) + ) + } + ) + validKeysWithMember <- EitherT.right( + mediatorGroup.active + .parFlatTraverse { mediatorId => + ipsSnapshot + .signingKeys(mediatorId) + .map(keys => keys.map(key => (key.id, (mediatorId, key)))) + } + .map(_.toMap) + ) + validKeys = validKeysWithMember.view.mapValues(_._2).toMap + keyMember = validKeysWithMember.view.mapValues(_._1).toMap + validated <- EitherT.right(signatures.forgetNE.parTraverse { signature => + EitherT + .fromEither[Future]( + verifySignature( + hash, + validKeys, + signature, + mediatorGroup.toString, + ) + ) + .fold( + x => x.invalid[MediatorId], + _ => keyMember(signature.signedBy).valid[SignatureCheckError], + ) + }) + _ <- { + val (signatureCheckErrors, validSigners) = validated.separate + EitherT.cond[Future]( + validSigners.distinct.sizeIs >= mediatorGroup.threshold.value, { + if (signatureCheckErrors.nonEmpty) { + val errors = SignatureCheckError.MultipleErrors(signatureCheckErrors) + // TODO(i13206): Replace with an Alarm + logger.warn( + s"Signature check passed for $mediatorGroup, although there were errors: $errors" + ) + } + () + }, + SignatureCheckError.MultipleErrors( + signatureCheckErrors, + Some("Mediator group signature threshold not reached"), + ): SignatureCheckError, + ) + } + } yield () + } + + private def ownerIsInitialized( + validKeys: Seq[SigningPublicKey] + ): EitherT[Future, SignatureCheckError, Boolean] = + member match { + case participant: ParticipantId => EitherT.right(ipsSnapshot.isParticipantActive(participant)) + case _ => // we assume that a member other than a participant is initialised if at least one valid key is known + EitherT.rightT(validKeys.nonEmpty) + } + + override def decrypt[M](encryptedMessage: Encrypted[M])( + deserialize: ByteString => Either[DeserializationError, M] + )(implicit traceContext: TraceContext): EitherT[Future, SyncCryptoError, M] = { + EitherT( + ipsSnapshot + .encryptionKey(member) + .map { keyO => + keyO + .toRight( + KeyNotAvailable( + member, + KeyPurpose.Encryption, + ipsSnapshot.timestamp, + Seq.empty, + ): SyncCryptoError + ) + } + ) + .flatMap(key => + crypto.privateCrypto + .decrypt(AsymmetricEncrypted(encryptedMessage.ciphertext, key.fingerprint))( + deserialize + ) + .leftMap(err => SyncCryptoError.SyncCryptoDecryptionError(err)) + ) + } + + override def decrypt[M](encryptedMessage: AsymmetricEncrypted[M])( + deserialize: ByteString => Either[DeserializationError, M] + )(implicit traceContext: TraceContext): EitherT[Future, SyncCryptoError, M] = { + crypto.privateCrypto + .decrypt(encryptedMessage)(deserialize) + .leftMap[SyncCryptoError](err => SyncCryptoError.SyncCryptoDecryptionError(err)) + } + + /** Encrypts a message for the given member + * + * Utility method to lookup a key on an IPS snapshot and then encrypt the given message with the + * most suitable key for the respective member. + */ + override def encryptFor[M <: HasVersionedToByteString]( + message: M, + member: Member, + version: ProtocolVersion, + ): EitherT[Future, SyncCryptoError, AsymmetricEncrypted[M]] = + EitherT( + ipsSnapshot + .encryptionKey(member) + .map { keyO => + keyO + .toRight( + KeyNotAvailable(member, KeyPurpose.Encryption, ipsSnapshot.timestamp, Seq.empty) + ) + .flatMap(k => + crypto.pureCrypto + .encryptWith(message, k, version) + .leftMap(SyncCryptoEncryptionError) + ) + } + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/X509CertificatePem.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/X509CertificatePem.scala new file mode 100644 index 0000000000..a06bc1cf59 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/X509CertificatePem.scala @@ -0,0 +1,56 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto + +import better.files.File +import cats.syntax.either.* +import com.google.protobuf.ByteString + +sealed trait X509CertificateEncoder[Encoding] { + + def fromBytes(encoded: ByteString): Either[String, Encoding] + + protected def unwrap(value: Either[String, Encoding]): Encoding = + value.valueOr(err => throw new IllegalArgumentException(s"Failed to load certificate: $err")) +} + +/** A X509 Certificate serialized in PEM format. */ +final case class X509CertificatePem private (private val bytes: ByteString) { + def unwrap: ByteString = bytes + + override def toString: String = bytes.toStringUtf8 +} + +object X509CertificatePem extends X509CertificateEncoder[X509CertificatePem] { + def fromString(pem: String): Either[String, X509CertificatePem] = + fromBytes(ByteString.copyFromUtf8(pem)) + + def tryFromString(pem: String): X509CertificatePem = unwrap(fromString(pem)) + + def fromFile(pemFile: File): Either[String, X509CertificatePem] = { + Either + .catchNonFatal(pemFile.loadBytes) + .leftMap(err => s"Failed to load PEM file: $err") + .map(ByteString.copyFrom) + .flatMap(X509CertificatePem.fromBytes) + } + + def tryFromFile(pemFile: File): X509CertificatePem = unwrap(fromFile(pemFile)) + + override def fromBytes(encoded: ByteString): Either[String, X509CertificatePem] = + Right(new X509CertificatePem(encoded)) +} + +/** A X509 Certificate serialized in DER format. */ +final case class X509CertificateDer private (private val bytes: ByteString) { + def unwrap: ByteString = bytes + + override def toString: String = bytes.toStringUtf8 +} + +object X509CertificateDer extends X509CertificateEncoder[X509CertificateDer] { + override def fromBytes(der: ByteString): Either[String, X509CertificateDer] = Right( + new X509CertificateDer(der) + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStore.scala new file mode 100644 index 0000000000..15fbeecba3 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStore.scala @@ -0,0 +1,168 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.store + +import cats.data.EitherT +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.crypto.store.db.DbCryptoPrivateStore +import com.digitalasset.canton.crypto.store.memory.InMemoryCryptoPrivateStore +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage} +import com.digitalasset.canton.tracing.{TraceContext, TracerProvider} +import com.digitalasset.canton.version.ReleaseProtocolVersion + +import scala.concurrent.{ExecutionContext, Future} + +sealed trait PrivateKeyWithName extends Product with Serializable { + type K <: PrivateKey + def privateKey: K + def name: Option[KeyName] +} + +final case class SigningPrivateKeyWithName( + override val privateKey: SigningPrivateKey, + override val name: Option[KeyName], +) extends PrivateKeyWithName { + type K = SigningPrivateKey +} + +final case class EncryptionPrivateKeyWithName( + override val privateKey: EncryptionPrivateKey, + override val name: Option[KeyName], +) extends PrivateKeyWithName { + type K = EncryptionPrivateKey +} + +/** A store for cryptographic private material such as signing/encryption private keys and hmac secrets. + * + * It encapsulates only existence checks/delete operations so it can be extendable to an external + * crypto private store (e.g. an AWS KMS store). + */ +trait CryptoPrivateStore extends AutoCloseable { + + def removePrivateKey( + keyId: Fingerprint + )(implicit traceContext: TraceContext): EitherT[Future, CryptoPrivateStoreError, Unit] + + def existsPrivateKey( + keyId: Fingerprint, + purpose: KeyPurpose, + )(implicit traceContext: TraceContext): EitherT[Future, CryptoPrivateStoreError, Boolean] + + def existsSigningKey(signingKeyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Boolean] + + def existsDecryptionKey(decryptionKeyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Boolean] + + def toExtended: Option[CryptoPrivateStoreExtended] = this match { + case extended: CryptoPrivateStoreExtended => Some(extended) + case _ => None + } +} + +object CryptoPrivateStore { + trait CryptoPrivateStoreFactory { + def create( + storage: Storage, + releaseProtocolVersion: ReleaseProtocolVersion, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + tracerProvider: TracerProvider, + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): EitherT[Future, CryptoPrivateStoreError, CryptoPrivateStore] + } + + class CommunityCryptoPrivateStoreFactory extends CryptoPrivateStoreFactory { + override def create( + storage: Storage, + releaseProtocolVersion: ReleaseProtocolVersion, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + tracerProvider: TracerProvider, + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): EitherT[Future, CryptoPrivateStoreError, CryptoPrivateStore] = + storage match { + case _: MemoryStorage => + EitherT.rightT[Future, CryptoPrivateStoreError]( + new InMemoryCryptoPrivateStore(releaseProtocolVersion, loggerFactory) + ) + case jdbc: DbStorage => + EitherT.rightT[Future, CryptoPrivateStoreError]( + new DbCryptoPrivateStore(jdbc, releaseProtocolVersion, timeouts, loggerFactory) + ) + } + } +} + +sealed trait CryptoPrivateStoreError extends Product with Serializable with PrettyPrinting +object CryptoPrivateStoreError { + + final case class FailedToListKeys(reason: String) extends CryptoPrivateStoreError { + override def pretty: Pretty[FailedToListKeys] = prettyOfClass(unnamedParam(_.reason.unquoted)) + } + + final case class FailedToGetWrapperKeyId(reason: String) extends CryptoPrivateStoreError { + override def pretty: Pretty[FailedToGetWrapperKeyId] = prettyOfClass( + unnamedParam(_.reason.unquoted) + ) + } + + final case class FailedToReadKey(keyId: Fingerprint, reason: String) + extends CryptoPrivateStoreError { + override def pretty: Pretty[FailedToReadKey] = prettyOfClass(unnamedParam(_.reason.unquoted)) + } + + final case class InvariantViolation(keyId: Fingerprint, reason: String) + extends CryptoPrivateStoreError { + override def pretty: Pretty[InvariantViolation] = prettyOfClass(unnamedParam(_.reason.unquoted)) + } + + final case class FailedToInsertKey(keyId: Fingerprint, reason: String) + extends CryptoPrivateStoreError { + override def pretty: Pretty[FailedToInsertKey] = + prettyOfClass(param("keyId", _.keyId), param("reason", _.reason.unquoted)) + } + + final case class KeyAlreadyExists(keyId: Fingerprint, existingKeyName: Option[String]) + extends CryptoPrivateStoreError { + override def pretty: Pretty[KeyAlreadyExists] = + prettyOfClass( + param("keyId", _.keyId), + param("existingKeyName", _.existingKeyName.getOrElse("").unquoted), + ) + } + + final case class FailedToDeleteKey(keyId: Fingerprint, reason: String) + extends CryptoPrivateStoreError { + override def pretty: Pretty[FailedToDeleteKey] = + prettyOfClass(param("keyId", _.keyId), param("reason", _.reason.unquoted)) + } + + final case class FailedToReplaceKeys(keyId: Seq[Fingerprint], reason: String) + extends CryptoPrivateStoreError { + override def pretty: Pretty[FailedToReplaceKeys] = + prettyOfClass(param("keyId", _.keyId), param("reason", _.reason.unquoted)) + } + + final case class EncryptedPrivateStoreError(reason: String) extends CryptoPrivateStoreError { + override def pretty: Pretty[EncryptedPrivateStoreError] = prettyOfClass( + unnamedParam(_.reason.unquoted) + ) + } + + final case class WrapperKeyAlreadyInUse(reason: String) extends CryptoPrivateStoreError { + override def pretty: Pretty[WrapperKeyAlreadyInUse] = prettyOfClass( + unnamedParam(_.reason.unquoted) + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreExtended.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreExtended.scala new file mode 100644 index 0000000000..f3afb122ed --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPrivateStoreExtended.scala @@ -0,0 +1,247 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.store + +import cats.data.EitherT +import cats.syntax.functor.* +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.crypto.KeyPurpose.{Encryption, Signing} +import com.digitalasset.canton.crypto.store.db.StoredPrivateKey +import com.digitalasset.canton.crypto.{ + EncryptionPrivateKey, + Fingerprint, + KeyName, + KeyPurpose, + PrivateKey, + SigningPrivateKey, +} +import com.digitalasset.canton.logging.NamedLogging +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.ReleaseProtocolVersion +import com.google.common.annotations.VisibleForTesting + +import scala.collection.concurrent.TrieMap +import scala.concurrent.{ExecutionContext, Future} + +/** Extends a CryptoPrivateStore with the necessary store write/read operations and is intended to be used by canton + * internal private crypto stores (e.g. [[com.digitalasset.canton.crypto.store.memory.InMemoryCryptoPrivateStore]], + * [[com.digitalasset.canton.crypto.store.db.DbCryptoPrivateStore]]). + * + * The cache provides a write-through cache such that `get` operations can be served without reading from the async store. + * Async population of the cache is done at creation time. + */ +trait CryptoPrivateStoreExtended extends CryptoPrivateStore { this: NamedLogging => + + implicit val ec: ExecutionContext + + protected val releaseProtocolVersion: ReleaseProtocolVersion + + // Cached values for keys and secret + protected val signingKeyMap: TrieMap[Fingerprint, SigningPrivateKeyWithName] = TrieMap.empty + protected val decryptionKeyMap: TrieMap[Fingerprint, EncryptionPrivateKeyWithName] = TrieMap.empty + + // Write methods that the underlying store has to implement. + private[crypto] def writePrivateKey( + key: StoredPrivateKey + )(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Unit] + + /** Replaces a set of keys transactionally to avoid an inconsistent state of the store. + * Key ids will remain the same while replacing these keys. + * + * @param newKeys sequence of keys to replace + */ + private[crypto] def replaceStoredPrivateKeys(newKeys: Seq[StoredPrivateKey])(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Unit] + + private[crypto] def readPrivateKey(keyId: Fingerprint, purpose: KeyPurpose)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Option[StoredPrivateKey]] + + @VisibleForTesting + private[canton] def listPrivateKeys(purpose: KeyPurpose, encrypted: Boolean)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Set[StoredPrivateKey]] + + private[crypto] def deletePrivateKey(keyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Unit] + + def storePrivateKey(key: PrivateKey, name: Option[KeyName])(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Unit] = { + (key: @unchecked) match { + case signingPrivateKey: SigningPrivateKey => storeSigningKey(signingPrivateKey, name) + case encryptionPrivateKey: EncryptionPrivateKey => + storeDecryptionKey(encryptionPrivateKey, name) + } + } + + def exportPrivateKey(keyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Option[PrivateKey]] = { + logger.info(s"Exporting private key: $keyId") + for { + sigKey <- signingKey(keyId).widen[Option[PrivateKey]] + key <- sigKey.fold(decryptionKey(keyId).widen[Option[PrivateKey]])(key => + EitherT.rightT(Some(key)) + ) + } yield key + } + + def existsPrivateKey( + keyId: Fingerprint, + keyPurpose: KeyPurpose, + )(implicit traceContext: TraceContext): EitherT[Future, CryptoPrivateStoreError, Boolean] = + keyPurpose match { + case KeyPurpose.Signing => existsSigningKey(keyId) + case KeyPurpose.Encryption => existsDecryptionKey(keyId) + } + + def removePrivateKey( + keyId: Fingerprint + )(implicit traceContext: TraceContext): EitherT[Future, CryptoPrivateStoreError, Unit] = { + val deletedSigKey = signingKeyMap.remove(keyId) + val deletedDecKey = decryptionKeyMap.remove(keyId) + + deletePrivateKey(keyId).leftMap { err => + // In case the deletion in the persistence layer failed, we have to restore the cache. + deletedSigKey.foreach(signingKeyMap.put(keyId, _)) + deletedDecKey.foreach(decryptionKeyMap.put(keyId, _)) + err + } + } + + private def readAndParsePrivateKey[A <: PrivateKey, B <: PrivateKeyWithName]( + keyPurpose: KeyPurpose, + parsingFunc: StoredPrivateKey => ParsingResult[A], + buildKeyWithNameFunc: (A, Option[KeyName]) => B, + )(keyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Option[B]] = + readPrivateKey(keyId, keyPurpose) + .flatMap { + case Some(storedPrivateKey) => + parsingFunc(storedPrivateKey) match { + case Left(parseErr) => + EitherT.leftT[Future, Option[B]]( + CryptoPrivateStoreError + .FailedToReadKey( + keyId, + s"could not parse stored key (it can either be corrupted or encrypted): ${parseErr.toString}", + ) + ) + case Right(privateKey) => + EitherT.rightT[Future, CryptoPrivateStoreError]( + Some(buildKeyWithNameFunc(privateKey, storedPrivateKey.name)) + ) + } + case None => EitherT.rightT[Future, CryptoPrivateStoreError](None) + } + + private[crypto] def signingKey(signingKeyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Option[SigningPrivateKey]] = + retrieveAndUpdateCache( + signingKeyMap, + keyFingerprint => + readAndParsePrivateKey[SigningPrivateKey, SigningPrivateKeyWithName]( + Signing, + key => SigningPrivateKey.fromByteString(key.data), + (privateKey, name) => SigningPrivateKeyWithName(privateKey, name), + )(keyFingerprint), + )(signingKeyId) + + private[crypto] def storeSigningKey( + key: SigningPrivateKey, + name: Option[KeyName], + )(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Unit] = + for { + _ <- writePrivateKey( + new StoredPrivateKey( + id = key.id, + data = key.toByteString(releaseProtocolVersion.v), + purpose = key.purpose, + name = name, + wrapperKeyId = None, + ) + ) + .map { _ => + signingKeyMap.put(key.id, SigningPrivateKeyWithName(key, name)).discard + } + } yield () + + def existsSigningKey(signingKeyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Boolean] = + signingKey(signingKeyId).map(_.nonEmpty) + + private[crypto] def decryptionKey(encryptionKeyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Option[EncryptionPrivateKey]] = { + retrieveAndUpdateCache( + decryptionKeyMap, + keyFingerprint => + readAndParsePrivateKey[EncryptionPrivateKey, EncryptionPrivateKeyWithName]( + Encryption, + key => EncryptionPrivateKey.fromByteString(key.data), + (privateKey, name) => EncryptionPrivateKeyWithName(privateKey, name), + )(keyFingerprint), + )(encryptionKeyId) + } + + private[crypto] def storeDecryptionKey( + key: EncryptionPrivateKey, + name: Option[KeyName], + )(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Unit] = + for { + _ <- writePrivateKey( + new StoredPrivateKey( + id = key.id, + data = key.toByteString(releaseProtocolVersion.v), + purpose = key.purpose, + name = name, + wrapperKeyId = None, + ) + ) + .map { _ => + decryptionKeyMap.put(key.id, EncryptionPrivateKeyWithName(key, name)).discard + } + } yield () + + def existsDecryptionKey(decryptionKeyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Boolean] = + decryptionKey(decryptionKeyId).map(_.nonEmpty) + + private def retrieveAndUpdateCache[KN <: PrivateKeyWithName]( + cache: TrieMap[Fingerprint, KN], + readKey: Fingerprint => EitherT[Future, CryptoPrivateStoreError, Option[KN]], + )(keyId: Fingerprint): EitherT[Future, CryptoPrivateStoreError, Option[KN#K]] = + cache.get(keyId) match { + case Some(value) => EitherT.rightT(Some(value.privateKey)) + case None => + readKey(keyId).map { keyOption => + keyOption.foreach(key => cache.putIfAbsent(keyId, key)) + keyOption.map(_.privateKey) + } + } + + /** Returns the wrapper key used to encrypt the private key + * or None if private key is not encrypted. + * + * @param keyId private key fingerprint + * @return the wrapper key used for encryption or None if key is not encrypted + */ + private[crypto] def encrypted(keyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Option[String300]] +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPublicStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPublicStore.scala new file mode 100644 index 0000000000..d58d8bade5 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/CryptoPublicStore.scala @@ -0,0 +1,223 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.store + +import cats.data.EitherT +import cats.syntax.functor.* +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.crypto.store.db.DbCryptoPublicStore +import com.digitalasset.canton.crypto.store.memory.InMemoryCryptoPublicStore +import com.digitalasset.canton.crypto.{KeyName, *} +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.ReleaseProtocolVersion +import com.google.common.annotations.VisibleForTesting + +import scala.collection.concurrent.TrieMap +import scala.concurrent.{ExecutionContext, Future} + +/** Store for all public cryptographic material such as certificates or public keys. */ +trait CryptoPublicStore extends AutoCloseable { + + implicit val ec: ExecutionContext + + // Cached values for public keys with names + protected val signingKeyMap: TrieMap[Fingerprint, SigningPublicKeyWithName] = TrieMap.empty + protected val encryptionKeyMap: TrieMap[Fingerprint, EncryptionPublicKeyWithName] = TrieMap.empty + + // Write methods that the underlying store has to implement for the caching + protected def writeSigningKey(key: SigningPublicKey, name: Option[KeyName])(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Unit] + protected def writeEncryptionKey(key: EncryptionPublicKey, name: Option[KeyName])(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Unit] + + protected[crypto] def listAllKeyFingerprints(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Set[Fingerprint]] = + for { + signingKeys <- listSigningKeys + encryptionKeys <- listEncryptionKeys + } yield signingKeys.map(_.publicKey.id) ++ encryptionKeys.map(_.publicKey.id) + + @VisibleForTesting + private[store] def listSigningKeys(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Set[SigningPublicKeyWithName]] + @VisibleForTesting + private[store] def listEncryptionKeys(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Set[EncryptionPublicKeyWithName]] + + def storePublicKey(publicKey: PublicKey, name: Option[KeyName])(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Unit] = + (publicKey: @unchecked) match { + case sigKey: SigningPublicKey => storeSigningKey(sigKey, name) + case encKey: EncryptionPublicKey => storeEncryptionKey(encKey, name) + } + + def publicKey(keyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Option[PublicKey]] = + publicKeyWithName(keyId).map(_.map(_.publicKey)) + + def publicKeyWithName(keyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Option[PublicKeyWithName]] = + for { + sigKeyOption <- readSigningKey(keyId) + pubKeyOption <- sigKeyOption.fold(readEncryptionKey(keyId).widen[Option[PublicKeyWithName]])( + key => EitherT.rightT(Some(key)) + ) + } yield pubKeyOption + + def existsPublicKey(keyId: Fingerprint, purpose: KeyPurpose)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Boolean] = + purpose match { + case KeyPurpose.Signing => signingKey(keyId).map(_.nonEmpty) + case KeyPurpose.Encryption => encryptionKey(keyId).map(_.nonEmpty) + } + + def findSigningKeyIdByName(keyName: KeyName)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Option[SigningPublicKey]] = + listSigningKeys.map(_.find(_.name.contains(keyName)).map(_.publicKey)) + + def findSigningKeyIdByFingerprint(fingerprint: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Option[SigningPublicKey]] = + listSigningKeys.map(_.find(_.publicKey.fingerprint == fingerprint).map(_.publicKey)) + + def findEncryptionKeyIdByName(keyName: KeyName)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Option[EncryptionPublicKey]] = + listEncryptionKeys.map(_.find(_.name.contains(keyName)).map(_.publicKey)) + + def publicKeysWithName(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Set[PublicKeyWithName]] = + for { + sigKeys <- listSigningKeys + encKeys <- listEncryptionKeys + } yield sigKeys.toSet[PublicKeyWithName] ++ encKeys.toSet[PublicKeyWithName] + + def signingKey(signingKeyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Option[SigningPublicKey]] = + retrieveKeyAndUpdateCache(signingKeyMap, readSigningKey(_))(signingKeyId) + + protected def readSigningKey(signingKeyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Option[SigningPublicKeyWithName]] + + def signingKeys(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Set[SigningPublicKey]] = + retrieveKeysAndUpdateCache(listSigningKeys, signingKeyMap) + + def storeSigningKey(key: SigningPublicKey, name: Option[KeyName] = None)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Unit] = + writeSigningKey(key, name).map { _ => + val _ = signingKeyMap.put(key.id, SigningPublicKeyWithName(key, name)) + } + + def encryptionKey(encryptionKeyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Option[EncryptionPublicKey]] = + retrieveKeyAndUpdateCache(encryptionKeyMap, readEncryptionKey(_))(encryptionKeyId) + + protected def readEncryptionKey(encryptionKeyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Option[EncryptionPublicKeyWithName]] + + def encryptionKeys(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Set[EncryptionPublicKey]] = + retrieveKeysAndUpdateCache(listEncryptionKeys, encryptionKeyMap) + + def storeEncryptionKey(key: EncryptionPublicKey, name: Option[KeyName] = None)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Unit] = + writeEncryptionKey(key, name) + .map { _ => + val _ = encryptionKeyMap.put(key.id, EncryptionPublicKeyWithName(key, name)) + } + + private def retrieveKeyAndUpdateCache[KN <: PublicKeyWithName]( + cache: TrieMap[Fingerprint, KN], + readKey: Fingerprint => EitherT[Future, CryptoPublicStoreError, Option[KN]], + )(keyId: Fingerprint): EitherT[Future, CryptoPublicStoreError, Option[KN#K]] = + cache.get(keyId) match { + case Some(value) => EitherT.rightT(Some(value.publicKey)) + case None => + readKey(keyId).map { keyOption => + keyOption.foreach(key => cache.putIfAbsent(keyId, key)) + keyOption.map(_.publicKey) + } + } + + private def retrieveKeysAndUpdateCache[KN <: PublicKeyWithName]( + keysFromDb: EitherT[Future, CryptoPublicStoreError, Set[KN]], + cache: TrieMap[Fingerprint, KN], + ): EitherT[Future, CryptoPublicStoreError, Set[KN#K]] = + for { + // we always rebuild the cache here just in case new keys have been added by another process + // this should not be a problem since these operations to get all keys are infrequent and typically + // typically the number of keys is not very large + storedKeys <- keysFromDb + _ = cache ++= storedKeys.map(k => k.publicKey.id -> k) + } yield storedKeys.map(_.publicKey) +} + +object CryptoPublicStore { + def create( + storage: Storage, + releaseProtocolVersion: ReleaseProtocolVersion, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit + ec: ExecutionContext + ): CryptoPublicStore = { + storage match { + case _: MemoryStorage => new InMemoryCryptoPublicStore + case dbStorage: DbStorage => + new DbCryptoPublicStore(dbStorage, releaseProtocolVersion, timeouts, loggerFactory) + } + } +} + +sealed trait CryptoPublicStoreError extends Product with Serializable with PrettyPrinting +object CryptoPublicStoreError { + + final case class FailedToListKeys(reason: String) extends CryptoPublicStoreError { + override def pretty: Pretty[FailedToListKeys] = prettyOfClass(unnamedParam(_.reason.unquoted)) + } + + final case class FailedToReadKey(keyId: Fingerprint, reason: String) + extends CryptoPublicStoreError { + override def pretty: Pretty[FailedToReadKey] = prettyOfClass(unnamedParam(_.reason.unquoted)) + } + + final case class FailedToInsertKey(keyId: Fingerprint, reason: String) + extends CryptoPublicStoreError { + override def pretty: Pretty[FailedToInsertKey] = + prettyOfClass(param("keyId", _.keyId), param("reason", _.reason.unquoted)) + } + + final case class KeyAlreadyExists(keyId: Fingerprint, existingKeyName: Option[String]) + extends CryptoPublicStoreError { + override def pretty: Pretty[KeyAlreadyExists] = + prettyOfClass( + param("keyId", _.keyId), + param("existingKeyName", _.existingKeyName.getOrElse("").unquoted), + ) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPrivateStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPrivateStore.scala new file mode 100644 index 0000000000..8a66f828b0 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPrivateStore.scala @@ -0,0 +1,272 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.store.db + +import cats.data.EitherT +import cats.syntax.bifunctor.* +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.crypto.KeyPurpose.{Encryption, Signing} +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.crypto.store.* +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.metrics.TimedLoadGauge +import com.digitalasset.canton.resource.DbStorage.DbAction +import com.digitalasset.canton.resource.DbStorage.Implicits.* +import com.digitalasset.canton.resource.{DbStorage, DbStore} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.EitherTUtil +import com.digitalasset.canton.version.ReleaseProtocolVersion +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.ByteString +import slick.dbio.DBIOAction +import slick.jdbc.GetResult +import slick.sql.SqlAction + +import scala.concurrent.{ExecutionContext, Future} + +/** Represents the data to be stored in the crypto_private_keys table. + * If wrapperKeyId is set (Some(wrapperKeyId)) then the data field is encrypted + * otherwise (None), then the data field is in plaintext. + * @param id canton identifier for a private key + * @param data a ByteString that stores either: (1) the serialized private key case class, which contains the private + * key plus metadata, or (2) the above proto serialization but encrypted with the wrapper key if present. + * @param purpose to identify if the key is for signing or encryption + * @param name an alias name for the private key + * @param wrapperKeyId identifies what is the key being used to encrypt the data field. If empty, data is + * unencrypted. + */ +final case class StoredPrivateKey( + id: Fingerprint, + data: ByteString, + purpose: KeyPurpose, + name: Option[KeyName], + wrapperKeyId: Option[String300], +) extends Product + with Serializable { + + def isEncrypted: Boolean = { this.wrapperKeyId.isDefined } + +} + +object StoredPrivateKey { + implicit def getResultStoredPrivateKey(implicit + getResultByteString: GetResult[ByteString] + ): GetResult[StoredPrivateKey] = + GetResult { r => + StoredPrivateKey(r.<<, r.<<, r.<<, r.<<, r.<<) + } +} + +class DbCryptoPrivateStore( + override protected val storage: DbStorage, + override protected val releaseProtocolVersion: ReleaseProtocolVersion, + override protected val timeouts: ProcessingTimeout, + override protected val loggerFactory: NamedLoggerFactory, +)(override implicit val ec: ExecutionContext) + extends CryptoPrivateStoreExtended + with DbStore { + + import storage.api.* + + private val insertTime: TimedLoadGauge = + storage.metrics.loadGaugeM("crypto-private-store-insert") + private val queryTime: TimedLoadGauge = + storage.metrics.loadGaugeM("crypto-private-store-query") + + private def queryKeys(purpose: KeyPurpose): DbAction.ReadOnly[Set[StoredPrivateKey]] = + sql"select key_id, data, purpose, name, wrapper_key_id from crypto_private_keys where purpose = $purpose" + .as[StoredPrivateKey] + .map(_.toSet) + + private def queryKey( + keyId: Fingerprint, + purpose: KeyPurpose, + ): DbAction.ReadOnly[Option[StoredPrivateKey]] = + sql"select key_id, data, purpose, name, wrapper_key_id from crypto_private_keys where key_id = $keyId and purpose = $purpose" + .as[StoredPrivateKey] + .headOption + + private def insertKeyUpdate( + key: StoredPrivateKey + ): DbAction.WriteOnly[Int] = { + storage.profile match { + case _: DbStorage.Profile.Oracle => + sqlu"""insert + /*+ IGNORE_ROW_ON_DUPKEY_INDEX ( CRYPTO_PRIVATE_KEYS ( key_id ) ) */ + into crypto_private_keys (key_id, purpose, data, name, wrapper_key_id) + values (${key.id}, ${key.purpose}, ${key.data}, ${key.name}, ${key.wrapperKeyId})""" + case _ => + sqlu"""insert into crypto_private_keys (key_id, purpose, data, name, wrapper_key_id) + values (${key.id}, ${key.purpose}, ${key.data}, ${key.name}, ${key.wrapperKeyId}) + on conflict do nothing""" + } + } + + private def insertKey(key: StoredPrivateKey)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Unit] = { + + def equalKeys(existingKey: StoredPrivateKey, newKey: StoredPrivateKey): Boolean = { + if (existingKey.wrapperKeyId.isEmpty) { + existingKey.data == newKey.data && + existingKey.name == newKey.name && + existingKey.purpose == newKey.purpose + } else { + // in the encrypted case we cannot compare the contents of data directly, we simply do not allow + // keys having the same name and purpose + existingKey.name == newKey.name && + existingKey.purpose == newKey.purpose + } + } + + insertTime.eitherTEvent { + for { + inserted <- EitherT.right( + storage.update(insertKeyUpdate(key), functionFullName) + ) + res <- + if (inserted == 0) { + // If no key was inserted by the insert query, check that the existing value matches + storage + .querySingle(queryKey(key.id, key.purpose), functionFullName) + // If we don't find the duplicate key, it may have been concurrently deleted and we could retry to insert it. + .toRight( + CryptoPrivateStoreError + .FailedToInsertKey(key.id, "No key inserted and no key found") + ) + .flatMap { existingKey => + EitherT + .cond[Future]( + equalKeys(existingKey, key), + (), + CryptoPrivateStoreError.KeyAlreadyExists(key.id, existingKey.name.map(_.unwrap)), + ) + .leftWiden[CryptoPrivateStoreError] + } + } else EitherT.rightT[Future, CryptoPrivateStoreError](()) + } yield res + } + } + + private[crypto] def readPrivateKey( + keyId: Fingerprint, + purpose: KeyPurpose, + )(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Option[StoredPrivateKey]] = + EitherTUtil.fromFuture( + storage + .querySingle( + queryKey(keyId, purpose), + functionFullName, + ) + .value, + err => CryptoPrivateStoreError.FailedToReadKey(keyId, err.toString), + ) + + private[crypto] def writePrivateKey( + key: StoredPrivateKey + )(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Unit] = + insertKey(key) + + @VisibleForTesting + private[canton] def listPrivateKeys(purpose: KeyPurpose, encrypted: Boolean)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Set[StoredPrivateKey]] = + EitherTUtil + .fromFuture( + queryTime + .event( + storage.query(queryKeys(purpose), functionFullName) + ) + .map(keys => keys.filter(_.isEncrypted == encrypted)), + err => CryptoPrivateStoreError.FailedToListKeys(err.toString), + ) + + private def deleteKey(keyId: Fingerprint): SqlAction[Int, NoStream, Effect.Write] = + sqlu"delete from crypto_private_keys where key_id = $keyId" + + /** Replaces keys but maintains their id stable, i.e. when the keys remain the same, but the + * storage format changes (e.g. encrypting a key) + */ + private[crypto] def replaceStoredPrivateKeys(newKeys: Seq[StoredPrivateKey])(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Unit] = + EitherTUtil.fromFuture( + insertTime.event { + storage + .update_( + DBIOAction + .sequence( + newKeys.map(key => deleteKey(key.id).andThen(insertKeyUpdate(key))) + ) + .transactionally, + functionFullName, + ) + }, + err => CryptoPrivateStoreError.FailedToReplaceKeys(newKeys.map(_.id), err.toString), + ) + + private[crypto] def deletePrivateKey(keyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Unit] = + EitherTUtil.fromFuture( + insertTime.event( + storage + .update_(deleteKey(keyId), functionFullName) + ), + err => CryptoPrivateStoreError.FailedToDeleteKey(keyId, err.toString), + ) + + private[crypto] def encrypted( + keyId: Fingerprint + )(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Option[String300]] = + (for { + sigStoreKey <- readPrivateKey(keyId, Signing) + storedKey <- sigStoreKey.fold(readPrivateKey(keyId, Encryption))(key => + EitherT.rightT(Some(key)) + ) + } yield storedKey).flatMap { + case Some(key) => + EitherT.rightT(key.wrapperKeyId) + case None => + EitherT.leftT(CryptoPrivateStoreError.FailedToReadKey(keyId, s"could not read key")) + } + + private[crypto] def getWrapperKeyId()(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Option[String300]] = + EitherTUtil + .fromFuture( + queryTime + .event( + storage.query( + { + sql"select distinct wrapper_key_id from crypto_private_keys" + .as[Option[String300]] + .map(_.toSeq) + }, + functionFullName, + ) + ), + err => CryptoPrivateStoreError.FailedToGetWrapperKeyId(err.toString), + ) + .transform { + case Left(err) => Left(err) + case Right(wrapper_keys) => + if (wrapper_keys.size > 1) + Left( + CryptoPrivateStoreError + .FailedToGetWrapperKeyId("Found more than one distinct wrapper_key_id") + ) + else + Right(wrapper_keys.flatten.headOption) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPublicStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPublicStore.scala new file mode 100644 index 0000000000..2bdbaa31af --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPublicStore.scala @@ -0,0 +1,159 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.store.db + +import cats.data.EitherT +import cats.syntax.bifunctor.* +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.crypto.store.* +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.metrics.TimedLoadGauge +import com.digitalasset.canton.resource.DbStorage.DbAction +import com.digitalasset.canton.resource.{DbStorage, DbStore} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.EitherTUtil +import com.digitalasset.canton.version.ReleaseProtocolVersion +import slick.jdbc.{GetResult, SetParameter} + +import scala.concurrent.{ExecutionContext, Future} + +class DbCryptoPublicStore( + override protected val storage: DbStorage, + protected val releaseProtocolVersion: ReleaseProtocolVersion, + override protected val timeouts: ProcessingTimeout, + override protected val loggerFactory: NamedLoggerFactory, +)(override implicit val ec: ExecutionContext) + extends CryptoPublicStore + with DbStore { + + import storage.api.* + import storage.converters.* + + private val insertTime: TimedLoadGauge = + storage.metrics.loadGaugeM("crypto-public-store-insert") + private val queryTime: TimedLoadGauge = + storage.metrics.loadGaugeM("crypto-public-store-query") + + private implicit val setParameterEncryptionPublicKey: SetParameter[EncryptionPublicKey] = + EncryptionPublicKey.getVersionedSetParameter(releaseProtocolVersion.v) + private implicit val setParameterSigningPublicKey: SetParameter[SigningPublicKey] = + SigningPublicKey.getVersionedSetParameter(releaseProtocolVersion.v) + + private def queryKeys[K: GetResult](purpose: KeyPurpose): DbAction.ReadOnly[Set[K]] = + sql"select data, name from crypto_public_keys where purpose = $purpose" + .as[K] + .map(_.toSet) + + private def queryKey[K <: PublicKeyWithName: GetResult]( + keyId: Fingerprint, + purpose: KeyPurpose, + ): DbAction.ReadOnly[Option[K]] = + sql"select data, name from crypto_public_keys where key_id = $keyId and purpose = $purpose" + .as[K] + .headOption + + private def insertKeyUpdate[K <: PublicKey: SetParameter, KN <: PublicKeyWithName: GetResult]( + key: K, + name: Option[KeyName], + ): DbAction.WriteOnly[Int] = + storage.profile match { + case _: DbStorage.Profile.Oracle => + sqlu"""insert + /*+ IGNORE_ROW_ON_DUPKEY_INDEX ( crypto_public_keys ( key_id ) ) */ + into crypto_public_keys (key_id, purpose, data, name) + values (${key.id}, ${key.purpose}, $key, $name)""" + case _ => + sqlu"""insert into crypto_public_keys (key_id, purpose, data, name) + values (${key.id}, ${key.purpose}, $key, $name) + on conflict do nothing""" + } + + private def insertKey[K <: PublicKey: SetParameter, KN <: PublicKeyWithName: GetResult]( + key: K, + name: Option[KeyName], + )(implicit traceContext: TraceContext): EitherT[Future, CryptoPublicStoreError, Unit] = + insertTime.eitherTEvent { + for { + inserted <- EitherT.right(storage.update(insertKeyUpdate(key, name), functionFullName)) + res <- + if (inserted == 0) { + // If no key was inserted by the insert query, check that the existing value matches + storage + .querySingle(queryKey(key.id, key.purpose), functionFullName) + .toRight( + CryptoPublicStoreError.FailedToInsertKey(key.id, "No key inserted and no key found") + ) + .flatMap { existingKey => + EitherT + .cond[Future]( + existingKey.publicKey == key && existingKey.name == name, + (), + CryptoPublicStoreError.KeyAlreadyExists(key.id, existingKey.name.map(_.unwrap)), + ) + .leftWiden[CryptoPublicStoreError] + } + } else EitherT.rightT[Future, CryptoPublicStoreError](()) + } yield res + } + + override def readSigningKey(signingKeyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Option[SigningPublicKeyWithName]] = + EitherTUtil.fromFuture( + storage + .querySingle( + queryKey[SigningPublicKeyWithName](signingKeyId, KeyPurpose.Signing), + functionFullName, + ) + .value, + err => CryptoPublicStoreError.FailedToReadKey(signingKeyId, err.toString), + ) + + override def readEncryptionKey(encryptionKeyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Option[EncryptionPublicKeyWithName]] = + EitherTUtil.fromFuture( + storage + .querySingle( + queryKey[EncryptionPublicKeyWithName](encryptionKeyId, KeyPurpose.Encryption), + functionFullName, + ) + .value, + err => CryptoPublicStoreError.FailedToReadKey(encryptionKeyId, err.toString), + ) + + override protected def writeSigningKey(key: SigningPublicKey, name: Option[KeyName])(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Unit] = + insertKey[SigningPublicKey, SigningPublicKeyWithName](key, name) + + override protected def writeEncryptionKey(key: EncryptionPublicKey, name: Option[KeyName])( + implicit traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Unit] = + insertKey[EncryptionPublicKey, EncryptionPublicKeyWithName](key, name) + + override private[store] def listSigningKeys(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Set[SigningPublicKeyWithName]] = + EitherTUtil.fromFuture( + queryTime.event( + storage.query(queryKeys[SigningPublicKeyWithName](KeyPurpose.Signing), functionFullName) + ), + err => CryptoPublicStoreError.FailedToListKeys(err.toString), + ) + + override private[store] def listEncryptionKeys(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Set[EncryptionPublicKeyWithName]] = + EitherTUtil + .fromFuture( + queryTime.event( + storage + .query(queryKeys[EncryptionPublicKeyWithName](KeyPurpose.Encryption), functionFullName) + ), + err => CryptoPublicStoreError.FailedToListKeys(err.toString), + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/memory/InMemoryCryptoPrivateStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/memory/InMemoryCryptoPrivateStore.scala new file mode 100644 index 0000000000..bc3c742c69 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/memory/InMemoryCryptoPrivateStore.scala @@ -0,0 +1,197 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.store.memory + +import cats.data.EitherT +import cats.syntax.either.* +import cats.syntax.parallel.* +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.crypto.KeyPurpose.{Encryption, Signing} +import com.digitalasset.canton.crypto.store.db.StoredPrivateKey +import com.digitalasset.canton.crypto.store.{ + CryptoPrivateStoreError, + CryptoPrivateStoreExtended, + EncryptionPrivateKeyWithName, + PrivateKeyWithName, + SigningPrivateKeyWithName, +} +import com.digitalasset.canton.crypto.{ + EncryptionPrivateKey, + Fingerprint, + KeyName, + KeyPurpose, + PrivateKey, + SigningPrivateKey, +} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.FutureInstances.* +import com.digitalasset.canton.util.TrieMapUtil +import com.digitalasset.canton.version.ReleaseProtocolVersion +import com.google.common.annotations.VisibleForTesting + +import scala.collection.concurrent.TrieMap +import scala.concurrent.{ExecutionContext, Future} + +/** The in-memory store does not provide any persistence and keys during runtime are stored in the generic caching layer. + */ +class InMemoryCryptoPrivateStore( + override protected val releaseProtocolVersion: ReleaseProtocolVersion, + override protected val loggerFactory: NamedLoggerFactory, +)( + override implicit val ec: ExecutionContext +) extends CryptoPrivateStoreExtended + with NamedLogging { + + private val storedSigningKeyMap: TrieMap[Fingerprint, SigningPrivateKeyWithName] = TrieMap.empty + private val storedDecryptionKeyMap: TrieMap[Fingerprint, EncryptionPrivateKeyWithName] = + TrieMap.empty + + private def wrapPrivateKeyInToStored(pk: PrivateKey, name: Option[KeyName]): StoredPrivateKey = + new StoredPrivateKey( + id = pk.id, + data = (pk: @unchecked) match { + case spk: SigningPrivateKey => spk.toByteString(releaseProtocolVersion.v) + case epk: EncryptionPrivateKey => epk.toByteString(releaseProtocolVersion.v) + }, + purpose = pk.purpose, + name = name, + wrapperKeyId = None, + ) + + private def errorDuplicate[K <: PrivateKeyWithName]( + keyId: Fingerprint, + oldKey: K, + newKey: K, + ): CryptoPrivateStoreError = + CryptoPrivateStoreError.KeyAlreadyExists(keyId, oldKey.name.map(_.unwrap)) + + private[crypto] def readPrivateKey(keyId: Fingerprint, purpose: KeyPurpose)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Option[StoredPrivateKey]] = { + purpose match { + case Signing => + storedSigningKeyMap + .get(keyId) + .parTraverse(pk => + EitherT.rightT[Future, CryptoPrivateStoreError]( + wrapPrivateKeyInToStored(pk.privateKey, pk.name) + ) + ) + case Encryption => + storedDecryptionKeyMap + .get(keyId) + .parTraverse(pk => + EitherT.rightT[Future, CryptoPrivateStoreError]( + wrapPrivateKeyInToStored(pk.privateKey, pk.name) + ) + ) + } + } + + private[crypto] def writePrivateKey( + key: StoredPrivateKey + )(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Unit] = { + + def parseAndWritePrivateKey[A <: PrivateKey, B <: PrivateKeyWithName]( + pk: A, + cache: TrieMap[Fingerprint, B], + buildKeyWithNameFunc: (A, Option[KeyName]) => B, + ): EitherT[Future, CryptoPrivateStoreError, Unit] = + TrieMapUtil + .insertIfAbsent( + cache, + key.id, + buildKeyWithNameFunc(pk, key.name), + errorDuplicate[B] _, + ) + .toEitherT + + val storedKey = key.purpose match { + case Signing => SigningPrivateKey.fromByteString(key.data) + case Encryption => EncryptionPrivateKey.fromByteString(key.data) + } + + for { + res <- storedKey match { + case Left(parseErr) => + EitherT.leftT[Future, Unit]( + CryptoPrivateStoreError.FailedToInsertKey( + key.id, + s"could not parse stored key (it can either be corrupted or encrypted): ${parseErr.toString}", + ): CryptoPrivateStoreError + ) + case Right(spk: SigningPrivateKey) => + parseAndWritePrivateKey[SigningPrivateKey, SigningPrivateKeyWithName]( + spk, + storedSigningKeyMap, + (privateKey, name) => SigningPrivateKeyWithName(privateKey, name), + ) + case Right(epk: EncryptionPrivateKey) => + parseAndWritePrivateKey[EncryptionPrivateKey, EncryptionPrivateKeyWithName]( + epk, + storedDecryptionKeyMap, + (privateKey, name) => EncryptionPrivateKeyWithName(privateKey, name), + ) + case _ => + EitherT.leftT[Future, Unit]( + CryptoPrivateStoreError.FailedToInsertKey( + key.id, + s"key type does not match any of the known types", + ): CryptoPrivateStoreError + ) + } + } yield res + } + + @VisibleForTesting + private[canton] def listPrivateKeys(purpose: KeyPurpose, encrypted: Boolean)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Set[StoredPrivateKey]] = + (purpose match { + case Signing => + storedSigningKeyMap.values.toSeq + .parTraverse((x: SigningPrivateKeyWithName) => + EitherT.rightT[Future, CryptoPrivateStoreError]( + wrapPrivateKeyInToStored(x.privateKey, x.name) + ) + ) + case Encryption => + storedDecryptionKeyMap.values.toSeq + .parTraverse((x: EncryptionPrivateKeyWithName) => + EitherT.rightT[Future, CryptoPrivateStoreError]( + wrapPrivateKeyInToStored(x.privateKey, x.name) + ) + ) + }).map(_.toSet) + + private[crypto] def deletePrivateKey( + keyId: Fingerprint + )(implicit traceContext: TraceContext): EitherT[Future, CryptoPrivateStoreError, Unit] = { + storedSigningKeyMap.remove(keyId).discard + storedDecryptionKeyMap.remove(keyId).discard + EitherT.rightT(()) + } + + private[crypto] def replaceStoredPrivateKeys(newKeys: Seq[StoredPrivateKey])(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Unit] = + newKeys + .parTraverse { newKey => + for { + _ <- deletePrivateKey(newKey.id) + _ <- writePrivateKey(newKey) + } yield () + } + .map(_ => ()) + + private[crypto] def encrypted(keyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPrivateStoreError, Option[String300]] = + EitherT.rightT[Future, CryptoPrivateStoreError](None) + + override def close(): Unit = () +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/memory/InMemoryCryptoPublicStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/memory/InMemoryCryptoPublicStore.scala new file mode 100644 index 0000000000..e234f5434f --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/crypto/store/memory/InMemoryCryptoPublicStore.scala @@ -0,0 +1,77 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.crypto.store.memory + +import cats.data.EitherT +import cats.syntax.either.* +import com.digitalasset.canton.crypto.store.{CryptoPublicStore, CryptoPublicStoreError} +import com.digitalasset.canton.crypto.{KeyName, *} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.TrieMapUtil + +import scala.collection.concurrent.TrieMap +import scala.concurrent.{ExecutionContext, Future} + +class InMemoryCryptoPublicStore(override implicit val ec: ExecutionContext) + extends CryptoPublicStore { + + private val storedSigningKeyMap: TrieMap[Fingerprint, SigningPublicKeyWithName] = TrieMap.empty + private val storedEncryptionKeyMap: TrieMap[Fingerprint, EncryptionPublicKeyWithName] = + TrieMap.empty + + private def errorKeyDuplicate[K <: PublicKeyWithName]( + keyId: Fingerprint, + oldKey: K, + newKey: K, + ): CryptoPublicStoreError = + CryptoPublicStoreError.KeyAlreadyExists(keyId, oldKey.name.map(_.unwrap)) + + override protected def writeSigningKey(key: SigningPublicKey, name: Option[KeyName])(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Unit] = { + TrieMapUtil + .insertIfAbsent( + storedSigningKeyMap, + key.id, + SigningPublicKeyWithName(key, name), + errorKeyDuplicate[SigningPublicKeyWithName] _, + ) + .toEitherT + } + + override def readSigningKey(signingKeyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Option[SigningPublicKeyWithName]] = + EitherT.rightT(storedSigningKeyMap.get(signingKeyId)) + + override def readEncryptionKey(encryptionKeyId: Fingerprint)(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Option[EncryptionPublicKeyWithName]] = + EitherT.rightT(storedEncryptionKeyMap.get(encryptionKeyId)) + + override protected def writeEncryptionKey(key: EncryptionPublicKey, name: Option[KeyName])( + implicit traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Unit] = { + TrieMapUtil + .insertIfAbsent( + storedEncryptionKeyMap, + key.id, + EncryptionPublicKeyWithName(key, name), + errorKeyDuplicate[EncryptionPublicKeyWithName] _, + ) + .toEitherT + } + + override private[store] def listSigningKeys(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Set[SigningPublicKeyWithName]] = + EitherT.rightT(storedSigningKeyMap.values.toSet) + + override private[store] def listEncryptionKeys(implicit + traceContext: TraceContext + ): EitherT[Future, CryptoPublicStoreError, Set[EncryptionPublicKeyWithName]] = + EitherT.rightT(storedEncryptionKeyMap.values.toSet) + + override def close(): Unit = () +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ActionDescription.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ActionDescription.scala new file mode 100644 index 0000000000..cd25aa8b6c --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ActionDescription.scala @@ -0,0 +1,538 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.daml.lf.transaction.TransactionVersion +import com.daml.lf.value.{Value, ValueCoder} +import com.digitalasset.canton.ProtoDeserializationError.{ + FieldNotSet, + OtherError, + ValueDeserializationError, +} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.ContractIdSyntax.* +import com.digitalasset.canton.protocol.LfHashSyntax.* +import com.digitalasset.canton.protocol.RefIdentifierSyntax.* +import com.digitalasset.canton.protocol.{ + GlobalKeySerialization, + LfActionNode, + LfContractId, + LfGlobalKey, + LfHash, + LfNodeCreate, + LfNodeExercises, + LfNodeFetch, + LfNodeLookupByKey, + LfTemplateId, + LfTransactionVersion, + RefIdentifierSyntax, + v2, +} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.util.NoCopy +import com.digitalasset.canton.version.{ + HasProtocolVersionedCompanion, + HasProtocolVersionedWrapper, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} +import com.digitalasset.canton.{LfChoiceName, LfInterfaceId, LfPartyId, LfVersioned} +import com.google.protobuf.ByteString + +/** Summarizes the information that is needed in addition to the other fields of [[ViewParticipantData]] for + * determining the root action of a view. + */ +sealed trait ActionDescription + extends Product + with Serializable + with PrettyPrinting + with HasProtocolVersionedWrapper[ActionDescription] { + + /** Whether the root action was a byKey action (exerciseByKey, fetchByKey, lookupByKey) */ + def byKey: Boolean + + /** The node seed for the root action of a view. Empty for fetch and lookupByKey nodes */ + def seedOption: Option[LfHash] + + /** The lf transaction version of the node */ + def version: LfTransactionVersion + + @transient override protected lazy val companionObj: ActionDescription.type = + ActionDescription + + protected def toProtoDescriptionV2: v2.ActionDescription.Description + + def toProtoV2: v2.ActionDescription = + v2.ActionDescription(description = toProtoDescriptionV2) + +} + +object ActionDescription extends HasProtocolVersionedCompanion[ActionDescription] { + override lazy val name: String = "ActionDescription" + + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(2) -> VersionedProtoConverter(ProtocolVersion.v30)(v2.ActionDescription)( + supportedProtoVersion(_)(fromProtoV2), + _.toProtoV2.toByteString, + ) + ) + + final case class InvalidActionDescription(message: String) + extends RuntimeException(message) + with PrettyPrinting { + override def pretty: Pretty[InvalidActionDescription] = prettyOfClass( + unnamedParam(_.message.unquoted) + ) + } + + def tryFromLfActionNode( + actionNode: LfActionNode, + seedO: Option[LfHash], + protocolVersion: ProtocolVersion, + ): ActionDescription = + fromLfActionNode(actionNode, seedO, protocolVersion).valueOr(err => throw err) + + /** Extracts the action description from an LF node and the optional seed. + * @param seedO Must be set iff `node` is a [[com.digitalasset.canton.protocol.LfNodeCreate]] or [[com.digitalasset.canton.protocol.LfNodeExercises]]. + */ + def fromLfActionNode( + actionNode: LfActionNode, + seedO: Option[LfHash], + protocolVersion: ProtocolVersion, + ): Either[InvalidActionDescription, ActionDescription] = + actionNode match { + case LfNodeCreate( + contractId, + _templateId, + _arg, + _agreementText, + _signatories, + _stakeholders, + _key, + version, + ) => + for { + seed <- seedO.toRight(InvalidActionDescription("No seed for a Create node given")) + } yield CreateActionDescription(contractId, seed, version)( + protocolVersionRepresentativeFor(protocolVersion) + ) + + case LfNodeExercises( + inputContract, + templateId, + interfaceId, + choice, + _consuming, + actors, + chosenValue, + _stakeholders, + _signatories, + _choiceObservers, + _choiceAuthorizers, + _children, + exerciseResult, + _key, + byKey, + version, + ) => + for { + seed <- seedO.toRight(InvalidActionDescription("No seed for an Exercise node given")) + actionDescription <- ExerciseActionDescription.create( + inputContract, + Some(templateId), + choice, + interfaceId, + chosenValue, + actors, + byKey, + seed, + version, + failed = exerciseResult.isEmpty, // absence of exercise result indicates failure + protocolVersionRepresentativeFor(protocolVersion), + ) + } yield actionDescription + + case LfNodeFetch( + inputContract, + _templateId, + actingParties, + _signatories, + _stakeholders, + _key, + byKey, + version, + ) => + for { + _ <- Either.cond( + seedO.isEmpty, + (), + InvalidActionDescription("No seed should be given for a Fetch node"), + ) + actors <- Either.cond( + actingParties.nonEmpty, + actingParties, + InvalidActionDescription("Fetch node without acting parties"), + ) + } yield FetchActionDescription(inputContract, actors, byKey, version)( + protocolVersionRepresentativeFor(protocolVersion) + ) + + case LfNodeLookupByKey(_, keyWithMaintainers, _result, version) => + for { + _ <- Either.cond( + seedO.isEmpty, + (), + InvalidActionDescription("No seed should be given for a LookupByKey node"), + ) + actionDescription <- LookupByKeyActionDescription.create( + keyWithMaintainers.globalKey, + version, + protocolVersionRepresentativeFor(protocolVersion), + ) + } yield actionDescription + } + + private def fromCreateProtoV2( + c: v2.ActionDescription.CreateActionDescription, + pv: RepresentativeProtocolVersion[ActionDescription.type], + ): ParsingResult[CreateActionDescription] = { + val v2.ActionDescription.CreateActionDescription(contractIdP, seedP, versionP) = c + for { + contractId <- ProtoConverter.parseLfContractId(contractIdP) + seed <- LfHash.fromProtoPrimitive("node_seed", seedP) + version <- lfVersionFromProtoVersioned(versionP) + } yield CreateActionDescription(contractId, seed, version)(pv) + } + + private def choiceFromProto(choiceP: String): ParsingResult[LfChoiceName] = + LfChoiceName + .fromString(choiceP) + .leftMap(err => ValueDeserializationError("choice", err)) + + private def fromExerciseProtoV2( + e: v2.ActionDescription.ExerciseActionDescription, + pv: RepresentativeProtocolVersion[ActionDescription.type], + ): ParsingResult[ExerciseActionDescription] = { + val v2.ActionDescription.ExerciseActionDescription( + inputContractIdP, + choiceP, + chosenValueB, + actorsP, + byKey, + seedP, + versionP, + failed, + interfaceIdP, + templateIdP, + ) = e + for { + inputContractId <- ProtoConverter.parseLfContractId(inputContractIdP) + templateId <- templateIdP.traverse(RefIdentifierSyntax.fromProtoPrimitive) + choice <- choiceFromProto(choiceP) + interfaceId <- interfaceIdP.traverse(RefIdentifierSyntax.fromProtoPrimitive) + version <- lfVersionFromProtoVersioned(versionP) + chosenValue <- ValueCoder + .decodeValue(ValueCoder.CidDecoder, version, chosenValueB) + .leftMap(err => ValueDeserializationError("chosen_value", err.errorMessage)) + actors <- actorsP.traverse(ProtoConverter.parseLfPartyId).map(_.toSet) + seed <- LfHash.fromProtoPrimitive("node_seed", seedP) + actionDescription <- ExerciseActionDescription + .create( + inputContractId, + templateId, + choice, + interfaceId, + chosenValue, + actors, + byKey, + seed, + version, + failed, + pv, + ) + .leftMap(err => OtherError(err.message)) + } yield actionDescription + } + + private def fromLookupByKeyProtoV2( + k: v2.ActionDescription.LookupByKeyActionDescription, + pv: RepresentativeProtocolVersion[ActionDescription.type], + ): ParsingResult[LookupByKeyActionDescription] = { + val v2.ActionDescription.LookupByKeyActionDescription(keyP) = k + for { + key <- ProtoConverter + .required("key", keyP) + .flatMap(GlobalKeySerialization.fromProtoV0) + actionDescription <- LookupByKeyActionDescription + .create(key.unversioned, key.version, pv) + .leftMap(err => OtherError(err.message)) + } yield actionDescription + } + + private def fromFetchProtoV2( + f: v2.ActionDescription.FetchActionDescription, + pv: RepresentativeProtocolVersion[ActionDescription.type], + ): ParsingResult[FetchActionDescription] = { + val v2.ActionDescription.FetchActionDescription(inputContractIdP, actorsP, byKey, versionP) = f + for { + inputContractId <- ProtoConverter.parseLfContractId(inputContractIdP) + actors <- actorsP.traverse(ProtoConverter.parseLfPartyId).map(_.toSet) + version <- lfVersionFromProtoVersioned(versionP) + } yield FetchActionDescription(inputContractId, actors, byKey, version)(pv) + } + + private[data] def fromProtoV2( + actionDescriptionP: v2.ActionDescription + ): ParsingResult[ActionDescription] = { + import v2.ActionDescription.Description.* + val v2.ActionDescription(description) = actionDescriptionP + + val pv = protocolVersionRepresentativeFor(ProtoVersion(2)) + + description match { + case Create(create) => fromCreateProtoV2(create, pv) + case Exercise(exercise) => fromExerciseProtoV2(exercise, pv) + case Fetch(fetch) => fromFetchProtoV2(fetch, pv) + case LookupByKey(lookup) => fromLookupByKeyProtoV2(lookup, pv) + case Empty => Left(FieldNotSet("description")) + } + } + + private def lfVersionFromProtoVersioned( + versionP: String + ): ParsingResult[LfTransactionVersion] = TransactionVersion.All + .find(_.protoValue == versionP) + .toRight(s"Unsupported transaction version $versionP") + .leftMap(ValueDeserializationError("version", _)) + + def serializeChosenValue( + chosenValue: Value, + transactionVersion: LfTransactionVersion, + ): Either[String, ByteString] = + ValueCoder + .encodeValue(ValueCoder.CidEncoder, transactionVersion, chosenValue) + .leftMap(_.errorMessage) + + final case class CreateActionDescription( + contractId: LfContractId, + seed: LfHash, + override val version: LfTransactionVersion, + )( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + ActionDescription.type + ] + ) extends ActionDescription { + override def byKey: Boolean = false + + override def seedOption: Option[LfHash] = Some(seed) + + override protected def toProtoDescriptionV2: v2.ActionDescription.Description.Create = + v2.ActionDescription.Description.Create( + v2.ActionDescription.CreateActionDescription( + contractId = contractId.toProtoPrimitive, + nodeSeed = seed.toProtoPrimitive, + version = version.protoValue, + ) + ) + + override def pretty: Pretty[CreateActionDescription] = prettyOfClass( + param("contract Id", _.contractId), + param("seed", _.seed), + param("version", _.version), + ) + } + + /** @throws InvalidActionDescription if the `chosen_value` cannot be serialized */ + final case class ExerciseActionDescription private ( + inputContractId: LfContractId, + templateId: Option[LfTemplateId], + choice: LfChoiceName, + interfaceId: Option[LfInterfaceId], + chosenValue: Value, + actors: Set[LfPartyId], + override val byKey: Boolean, + seed: LfHash, + override val version: LfTransactionVersion, + failed: Boolean, + )( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + ActionDescription.type + ] + ) extends ActionDescription { + + private val serializedChosenValue: ByteString = serializeChosenValue(chosenValue, version) + .valueOr(err => throw InvalidActionDescription(s"Failed to serialize chosen value: $err")) + + override def seedOption: Option[LfHash] = Some(seed) + + override protected def toProtoDescriptionV2: v2.ActionDescription.Description.Exercise = + v2.ActionDescription.Description.Exercise( + v2.ActionDescription.ExerciseActionDescription( + inputContractId = inputContractId.toProtoPrimitive, + templateId = templateId.map(i => new RefIdentifierSyntax(i).toProtoPrimitive), + choice = choice, + interfaceId = interfaceId.map(i => new RefIdentifierSyntax(i).toProtoPrimitive), + chosenValue = serializedChosenValue, + actors = actors.toSeq, + byKey = byKey, + nodeSeed = seed.toProtoPrimitive, + version = version.protoValue, + failed = failed, + ) + ) + + override def pretty: Pretty[ExerciseActionDescription] = prettyOfClass( + param("input contract id", _.inputContractId), + param("template id", _.templateId), + param("choice", _.choice.unquoted), + param("chosen value", _.chosenValue), + param("actors", _.actors), + paramIfTrue("by key", _.byKey), + param("seed", _.seed), + param("version", _.version), + paramIfTrue("failed", _.failed), + ) + } + + object ExerciseActionDescription { + def tryCreate( + inputContractId: LfContractId, + templateId: Option[LfTemplateId], + choice: LfChoiceName, + interfaceId: Option[LfInterfaceId], + chosenValue: Value, + actors: Set[LfPartyId], + byKey: Boolean, + seed: LfHash, + version: LfTransactionVersion, + failed: Boolean, + protocolVersion: RepresentativeProtocolVersion[ActionDescription.type], + ): ExerciseActionDescription = create( + inputContractId, + templateId, + choice, + interfaceId, + chosenValue, + actors, + byKey, + seed, + version, + failed, + protocolVersion, + ).fold(err => throw err, identity) + + def create( + inputContractId: LfContractId, + templateId: Option[LfTemplateId], + choice: LfChoiceName, + interfaceId: Option[LfInterfaceId], + chosenValue: Value, + actors: Set[LfPartyId], + byKey: Boolean, + seed: LfHash, + version: LfTransactionVersion, + failed: Boolean, + protocolVersion: RepresentativeProtocolVersion[ActionDescription.type], + ): Either[InvalidActionDescription, ExerciseActionDescription] = + Either.catchOnly[InvalidActionDescription]( + ExerciseActionDescription( + inputContractId, + templateId, + choice, + interfaceId, + chosenValue, + actors, + byKey, + seed, + version, + failed, + )(protocolVersion) + ) + + } + + final case class FetchActionDescription( + inputContractId: LfContractId, + actors: Set[LfPartyId], + override val byKey: Boolean, + override val version: LfTransactionVersion, + )( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + ActionDescription.type + ] + ) extends ActionDescription + with NoCopy { + + override def seedOption: Option[LfHash] = None + + override protected def toProtoDescriptionV2: v2.ActionDescription.Description.Fetch = + v2.ActionDescription.Description.Fetch( + v2.ActionDescription.FetchActionDescription( + inputContractId = inputContractId.toProtoPrimitive, + actors = actors.toSeq, + byKey = byKey, + version = version.protoValue, + ) + ) + + override def pretty: Pretty[FetchActionDescription] = prettyOfClass( + param("input contract id", _.inputContractId), + param("actors", _.actors), + paramIfTrue("by key", _.byKey), + param("version", _.version), + ) + } + + final case class LookupByKeyActionDescription private ( + key: LfGlobalKey, + override val version: LfTransactionVersion, + )( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + ActionDescription.type + ] + ) extends ActionDescription { + + private val serializedKey = + GlobalKeySerialization + .toProto(LfVersioned(version, key)) + .valueOr(err => throw InvalidActionDescription(s"Failed to serialize key: $err")) + + override def byKey: Boolean = true + + override def seedOption: Option[LfHash] = None + + override protected def toProtoDescriptionV2: v2.ActionDescription.Description.LookupByKey = + v2.ActionDescription.Description.LookupByKey( + v2.ActionDescription.LookupByKeyActionDescription( + key = Some(serializedKey) + ) + ) + + override def pretty: Pretty[LookupByKeyActionDescription] = prettyOfClass( + param("key", _.key), + param("version", _.version), + ) + } + + object LookupByKeyActionDescription { + def tryCreate( + key: LfGlobalKey, + version: LfTransactionVersion, + protocolVersion: RepresentativeProtocolVersion[ActionDescription.type], + ): LookupByKeyActionDescription = + new LookupByKeyActionDescription(key, version)(protocolVersion) + + def create( + key: LfGlobalKey, + version: LfTransactionVersion, + protocolVersion: RepresentativeProtocolVersion[ActionDescription.type], + ): Either[InvalidActionDescription, LookupByKeyActionDescription] = + Either.catchOnly[InvalidActionDescription](tryCreate(key, version, protocolVersion)) + + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/CantonTimestamp.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/CantonTimestamp.scala new file mode 100644 index 0000000000..d767ab2f5b --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/CantonTimestamp.scala @@ -0,0 +1,129 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.Order +import cats.syntax.either.* +import com.digitalasset.canton.LfTimestamp +import com.digitalasset.canton.ProtoDeserializationError.TimestampConversionError +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.time.RefinedDuration +import com.digitalasset.canton.util.TryUtil +import com.google.protobuf.timestamp.Timestamp as ProtoTimestamp +import slick.jdbc.{GetResult, SetParameter} + +import java.time.{Duration, Instant} +import java.util.Date + +/** A timestamp implementation for canton, which currently uses a [[LfTimestamp]]. + * @param underlying A [[LfTimestamp]], holding the value of this [[CantonTimestamp]]. + */ +final case class CantonTimestamp(underlying: LfTimestamp) + extends Ordered[CantonTimestamp] + with Timestamp { + + def minus(d: Duration): CantonTimestamp = new CantonTimestamp( + underlying.add(Duration.ZERO.minus(d)) + ) + + def plus(d: Duration): CantonTimestamp = new CantonTimestamp(underlying.add(d)) + + def add(d: Duration): CantonTimestamp = new CantonTimestamp(underlying.add(d)) + + def addMicros(micros: Long): CantonTimestamp = new CantonTimestamp(underlying.addMicros(micros)) + + def plusSeconds(seconds: Long): CantonTimestamp = + new CantonTimestamp(underlying.add(Duration.ofSeconds(seconds))) + + def minusSeconds(seconds: Long): CantonTimestamp = this.minus(Duration.ofSeconds(seconds)) + + def plusMillis(millis: Long): CantonTimestamp = new CantonTimestamp( + underlying.add(Duration.ofMillis(millis)) + ) + + def minusMillis(millis: Long): CantonTimestamp = + new CantonTimestamp(underlying.add(Duration.ZERO.minus(Duration.ofMillis(millis)))) + + def immediatePredecessor: CantonTimestamp = new CantonTimestamp(underlying.addMicros(-1L)) + + def immediateSuccessor: CantonTimestamp = new CantonTimestamp(underlying.addMicros(1L)) + + override def compare(that: CantonTimestamp): Int = underlying.compare(that.underlying) + + override def compareTo(other: CantonTimestamp): Int = underlying.compareTo(other.underlying) + + def min(that: CantonTimestamp): CantonTimestamp = if (compare(that) > 0) that else this + + def max(that: CantonTimestamp): CantonTimestamp = if (compare(that) > 0) this else that + + def -(other: CantonTimestamp): Duration = + Duration.ofNanos(1000L * (this.underlying.micros - other.underlying.micros)) + + def +(duration: RefinedDuration): CantonTimestamp = plus(duration.unwrap) + def -(duration: RefinedDuration): CantonTimestamp = minus(duration.unwrap) + + def <=(other: CantonTimestampSecond): Boolean = this <= other.forgetRefinement + def <(other: CantonTimestampSecond): Boolean = this < other.forgetRefinement + + def >=(other: CantonTimestampSecond): Boolean = this >= other.forgetRefinement + def >(other: CantonTimestampSecond): Boolean = this > other.forgetRefinement +} + +object CantonTimestamp { + + def Epoch: CantonTimestamp = new CantonTimestamp(LfTimestamp.Epoch) + + def MinValue: CantonTimestamp = new CantonTimestamp(LfTimestamp.MinValue) + + def MaxValue: CantonTimestamp = new CantonTimestamp(LfTimestamp.MaxValue) + + def fromProtoPrimitive(ts: ProtoTimestamp): ParsingResult[CantonTimestamp] = { + for { + instant <- ProtoConverter.InstantConverter.fromProtoPrimitive(ts) + ts <- LfTimestamp.fromInstant(instant).left.map(err => TimestampConversionError(err)) + } yield new CantonTimestamp(ts) + } + + def ofEpochSecond(seconds: Long): CantonTimestamp = + new CantonTimestamp(LfTimestamp.assertFromLong(micros = seconds * 1000 * 1000)) + + def ofEpochMilli(milli: Long): CantonTimestamp = + new CantonTimestamp(LfTimestamp.assertFromLong(micros = milli * 1000)) + + def ofEpochMicro(micros: Long): CantonTimestamp = assertFromLong(micros) + + /** Get Instant.now (try to use clock.now instead!) + * + * Generally, try to use clock.now except for tests. Clock.now supports sim-clock such that + * we can perform static time tests. + */ + def now(): CantonTimestamp = new CantonTimestamp(LfTimestamp.assertFromInstant(Instant.now())) + + def fromInstant(i: Instant): Either[String, CantonTimestamp] = + LfTimestamp.fromInstant(i).map(t => new CantonTimestamp(t)) + + def fromDate(javaDate: Date): Either[String, CantonTimestamp] = + for { + instant <- TryUtil.tryCatchInterrupted(javaDate.toInstant).toEither.leftMap(_.getMessage) + cantonTimestamp <- CantonTimestamp.fromInstant(instant) + } yield cantonTimestamp + + def assertFromInstant(i: Instant) = new CantonTimestamp(LfTimestamp.assertFromInstant(i)) + def assertFromLong(micros: Long) = new CantonTimestamp(LfTimestamp.assertFromLong(micros)) + + implicit val orderCantonTimestamp: Order[CantonTimestamp] = Order.fromOrdering + + // Timestamps are stored as microseconds relative to EPOCH in a `bigint` rather than a SQL `timestamp`. + // This avoids all the time zone conversions introduced by various layers that are hard to make consistent + // across databases. + implicit val setParameterTimestamp: SetParameter[CantonTimestamp] = (v, pp) => + pp.setLong(v.toMicros) + implicit val setParameterOptionTimestamp: SetParameter[Option[CantonTimestamp]] = (v, pp) => + pp.setLongOption(v.map(_.toMicros)) + implicit val getResultTimestamp: GetResult[CantonTimestamp] = + GetResult(r => CantonTimestamp.assertFromLong(r.nextLong())) + implicit val getResultOptionTimestamp: GetResult[Option[CantonTimestamp]] = + GetResult(r => r.nextLongOption().map(CantonTimestamp.assertFromLong)) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/CantonTimestampSecond.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/CantonTimestampSecond.scala new file mode 100644 index 0000000000..587a5d8848 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/CantonTimestampSecond.scala @@ -0,0 +1,136 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.kernel.Order +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.time.PositiveSeconds +import com.digitalasset.canton.{LfTimestamp, ProtoDeserializationError} +import com.google.protobuf.timestamp.{Timestamp as ProtoTimestamp} +import slick.jdbc.{GetResult, SetParameter} + +import java.time.{Duration, Instant} + +/** A timestamp implementation for canton, which currently uses a [[LfTimestamp]], + * which is rounded to the second. + * + * @param underlying A [[LfTimestamp]], holding the value of this [[CantonTimestampSecond]]. + */ +final case class CantonTimestampSecond private (underlying: LfTimestamp) + extends Ordered[CantonTimestampSecond] + with Timestamp { + + require(microsOverSecond() == 0, s"Timestamp $underlying must be rounded to the second") + + def forgetRefinement: CantonTimestamp = CantonTimestamp(underlying) + + def plusSeconds(seconds: Long): CantonTimestampSecond = + CantonTimestampSecond(underlying.add(Duration.ofSeconds(seconds))) + + def minusSeconds(seconds: Long): CantonTimestampSecond = plusSeconds(-seconds) + + override def compare(that: CantonTimestampSecond): Int = underlying.compare(that.underlying) + + override def compareTo(other: CantonTimestampSecond): Int = underlying.compareTo(other.underlying) + + def -(other: CantonTimestampSecond): Duration = + Duration.ofNanos(1000L * (this.underlying.micros - other.underlying.micros)) + + def +(duration: PositiveSeconds): CantonTimestampSecond = CantonTimestampSecond( + underlying.add(duration.duration) + ) + + def -(duration: PositiveSeconds): CantonTimestampSecond = CantonTimestampSecond( + underlying.add(Duration.ZERO.minus(duration.duration)) + ) + + def >(other: CantonTimestamp): Boolean = forgetRefinement > other + def >=(other: CantonTimestamp): Boolean = forgetRefinement >= other + + def <(other: CantonTimestamp): Boolean = forgetRefinement < other + def <=(other: CantonTimestamp): Boolean = forgetRefinement <= other + +} + +object CantonTimestampSecond { + def max( + timestamp: CantonTimestampSecond, + timestamps: CantonTimestampSecond* + ): CantonTimestampSecond = { + timestamps.foldLeft(timestamp) { case (a, b) => + if (a > b) a else b + } + } + + def min( + timestamp: CantonTimestampSecond, + timestamps: CantonTimestampSecond* + ): CantonTimestampSecond = { + timestamps.foldLeft(timestamp) { case (a, b) => + if (a < b) a else b + } + } + + def Epoch = CantonTimestampSecond(LfTimestamp.Epoch) + + def MinValue = CantonTimestampSecond(LfTimestamp.MinValue) + + def fromProtoPrimitive(ts: ProtoTimestamp): ParsingResult[CantonTimestampSecond] = { + for { + instant <- ProtoConverter.InstantConverter.fromProtoPrimitive(ts) + ts <- CantonTimestampSecond + .fromInstant(instant) + .left + .map(ProtoDeserializationError.InvariantViolation(_)) + } yield ts + } + + def ofEpochSecond(seconds: Long): CantonTimestampSecond = + CantonTimestampSecond(LfTimestamp.assertFromLong(micros = seconds * 1000 * 1000)) + + def fromInstant(i: Instant): Either[String, CantonTimestampSecond] = + for { + _ <- Either.cond(i.getNano == 0, (), s"Timestamp $i is not rounded to the second") + ts <- LfTimestamp.fromInstant(i) + } yield CantonTimestampSecond(ts) + + def fromCantonTimestamp(ts: CantonTimestamp): Either[String, CantonTimestampSecond] = + Either.cond( + ts.microsOverSecond() == 0, + CantonTimestampSecond(ts.underlying), + s"Timestamp $ts is not rounded to the second", + ) + + /** @param ts + * @return `ts` if `ts` is already rounded to the second, the previous rounded timestamp otherwise. + */ + def floor(ts: CantonTimestamp): CantonTimestampSecond = + if (ts.microsOverSecond() == 0) CantonTimestampSecond(ts.underlying) + else CantonTimestampSecond.ofEpochSecond(ts.getEpochSecond) + + /** @param ts + * @return `ts` if `ts` is already rounded to the second, the next rounded timestamp otherwise. + */ + def ceil(ts: CantonTimestamp): CantonTimestampSecond = + if (ts.microsOverSecond() == 0) CantonTimestampSecond(ts.underlying) + else CantonTimestampSecond.ofEpochSecond(ts.getEpochSecond + 1) + + def assertFromInstant(i: Instant) = CantonTimestampSecond(LfTimestamp.assertFromInstant(i)) + def assertFromLong(micros: Long) = CantonTimestampSecond(LfTimestamp.assertFromLong(micros)) + + implicit val orderCantonTimestampSecond: Order[CantonTimestampSecond] = Order.fromOrdering + + // Timestamps are stored as microseconds relative to EPOCH in a `bigint` rather than a SQL `timestamp`. + // This avoids all the time zone conversions introduced by various layers that are hard to make consistent + // across databases. + implicit val setParameterTimestamp: SetParameter[CantonTimestampSecond] = (v, pp) => + pp.setLong(v.toMicros) + implicit val setParameterOptionTimestamp: SetParameter[Option[CantonTimestampSecond]] = (v, pp) => + pp.setLongOption(v.map(_.toMicros)) + implicit val getResultTimestamp: GetResult[CantonTimestampSecond] = + GetResult(r => CantonTimestampSecond.assertFromLong(r.nextLong())) + implicit val getResultOptionTimestamp: GetResult[Option[CantonTimestampSecond]] = + GetResult(r => r.nextLongOption().map(CantonTimestampSecond.assertFromLong)) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/CommonMetadata.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/CommonMetadata.scala new file mode 100644 index 0000000000..a2bd7aa691 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/CommonMetadata.scala @@ -0,0 +1,131 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.either.* +import com.digitalasset.canton.* +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.protocol.{ConfirmationPolicy, v1} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{ProtoConverter, ProtocolVersionedMemoizedEvidence} +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.version.* +import com.google.protobuf.ByteString + +import java.util.UUID + +/** Information concerning every '''member''' involved in the underlying transaction. + * + * @param confirmationPolicy determines who must confirm the request + */ +final case class CommonMetadata private ( + confirmationPolicy: ConfirmationPolicy, + domainId: DomainId, + mediator: MediatorRef, + salt: Salt, + uuid: UUID, +)( + hashOps: HashOps, + override val representativeProtocolVersion: RepresentativeProtocolVersion[CommonMetadata.type], + override val deserializedFrom: Option[ByteString], +) extends MerkleTreeLeaf[CommonMetadata](hashOps) + with HasProtocolVersionedWrapper[CommonMetadata] + with ProtocolVersionedMemoizedEvidence { + + override protected[this] def toByteStringUnmemoized: ByteString = + super[HasProtocolVersionedWrapper].toByteString + + override val hashPurpose: HashPurpose = HashPurpose.CommonMetadata + + override def pretty: Pretty[CommonMetadata] = prettyOfClass( + param("confirmation policy", _.confirmationPolicy), + param("domain id", _.domainId), + param("mediator", _.mediator), + param("uuid", _.uuid), + param("salt", _.salt), + ) + + @transient override protected lazy val companionObj: CommonMetadata.type = CommonMetadata + + private def toProtoV1: v1.CommonMetadata = { + v1.CommonMetadata( + confirmationPolicy = confirmationPolicy.toProtoPrimitive, + domainId = domainId.toProtoPrimitive, + salt = Some(salt.toProtoV0), + uuid = ProtoConverter.UuidConverter.toProtoPrimitive(uuid), + mediator = mediator.toProtoPrimitive, + ) + } +} + +object CommonMetadata + extends HasMemoizedProtocolVersionedWithContextCompanion[ + CommonMetadata, + HashOps, + ] { + override val name: String = "CommonMetadata" + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)(v1.CommonMetadata)( + supportedProtoVersionMemoized(_)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + def create( + hashOps: HashOps, + protocolVersion: ProtocolVersion, + )( + confirmationPolicy: ConfirmationPolicy, + domain: DomainId, + mediator: MediatorRef, + salt: Salt, + uuid: UUID, + ): CommonMetadata = create( + hashOps, + protocolVersionRepresentativeFor(protocolVersion), + )(confirmationPolicy, domain, mediator, salt, uuid) + + def create( + hashOps: HashOps, + protocolVersion: RepresentativeProtocolVersion[CommonMetadata.type], + )( + confirmationPolicy: ConfirmationPolicy, + domain: DomainId, + mediator: MediatorRef, + salt: Salt, + uuid: UUID, + ): CommonMetadata = + CommonMetadata(confirmationPolicy, domain, mediator, salt, uuid)( + hashOps, + protocolVersion, + None, + ) + + private def fromProtoV1(hashOps: HashOps, metaDataP: v1.CommonMetadata)( + bytes: ByteString + ): ParsingResult[CommonMetadata] = + for { + confirmationPolicy <- ConfirmationPolicy + .fromProtoPrimitive(metaDataP.confirmationPolicy) + .leftMap(e => + ProtoDeserializationError.ValueDeserializationError("confirmationPolicy", e.show) + ) + v1.CommonMetadata(saltP, _confirmationPolicyP, domainIdP, uuidP, mediatorP) = metaDataP + domainUid <- UniqueIdentifier + .fromProtoPrimitive_(domainIdP) + .leftMap(ProtoDeserializationError.ValueDeserializationError("domainId", _)) + mediator <- MediatorRef + .fromProtoPrimitive(mediatorP, "CommonMetadata.mediator_id") + salt <- ProtoConverter + .parseRequired(Salt.fromProtoV0, "salt", saltP) + .leftMap(_.inField("salt")) + uuid <- ProtoConverter.UuidConverter.fromProtoPrimitive(uuidP).leftMap(_.inField("uuid")) + } yield CommonMetadata(confirmationPolicy, DomainId(domainUid), mediator, salt, uuid)( + hashOps, + protocolVersionRepresentativeFor(ProtoVersion(1)), + Some(bytes), + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/Counter.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/Counter.scala new file mode 100644 index 0000000000..6875d6ea09 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/Counter.scala @@ -0,0 +1,66 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import slick.jdbc.{GetResult, SetParameter} + +final case class Counter[Discr](v: Long) extends Ordered[Counter[Discr]] with PrettyPrinting { + def unwrap: Long = v + def toProtoPrimitive: Long = v + + def +(i: Long): Counter[Discr] = Counter(v + i) + def -(i: Long): Counter[Discr] = Counter(v - i) + + def +(i: Int): Counter[Discr] = Counter(v + i) + + def increment: Either[String, Counter[Discr]] = + Either.cond(this.isNotMaxValue, this + 1, "Counter Overflow") + + def -(i: Int): Counter[Discr] = Counter(v - i) + + def +(other: Counter[Discr]): Counter[Discr] = Counter(v + other.v) + def -(other: Counter[Discr]): Long = v - other.v + + def max(other: Counter[Discr]): Counter[Discr] = Counter[Discr](v.max(other.v)) + + def until(end: Counter[Discr]): Seq[Counter[Discr]] = (v until end.v).map(Counter(_)) + def to(end: Counter[Discr]): Seq[Counter[Discr]] = (v to end.v).map(Counter(_)) + + def isMaxValue: Boolean = v == Long.MaxValue + def isNotMaxValue: Boolean = !isMaxValue + + override def compare(that: Counter[Discr]): Int = v.compare(that.v) + + override def pretty: Pretty[Counter.this.type] = prettyOfString(_ => v.toString) +} + +trait CounterCompanion[T] { + + /** The request counter assigned to the first request in the lifetime of a participant */ + val Genesis: Counter[T] = Counter[T](0) + val MaxValue: Counter[T] = Counter[T](Long.MaxValue) + val MinValue: Counter[T] = Counter[T](Long.MinValue) + + def apply(i: Long): Counter[T] = Counter[T](i) + + def apply(i: Int): Counter[T] = Counter[T](i.toLong) + + def unapply(sc: Counter[T]): Option[Long] = Some(sc.unwrap) +} + +object Counter { + def MaxValue[Discr]: Counter[Discr] = Counter[Discr](Long.MaxValue) + def MinValue[Discr]: Counter[Discr] = Counter[Discr](Long.MinValue) + + implicit def getResult[Discr]: GetResult[Counter[Discr]] = + GetResult(r => Counter[Discr](r.nextLong())) + implicit def getResultO[Discr]: GetResult[Option[Counter[Discr]]] = + GetResult(r => r.nextLongOption().map(Counter[Discr])) + + implicit def setParameter[Discr]: SetParameter[Counter[Discr]] = { (value, pp) => pp >> value.v } + implicit def setParameterO[Discr]: SetParameter[Option[Counter[Discr]]] = { (value, pp) => + pp >> value.map(_.v) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/FullInformeeTree.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/FullInformeeTree.scala new file mode 100644 index 0000000000..26d6af9e84 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/FullInformeeTree.scala @@ -0,0 +1,151 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.either.* +import com.digitalasset.canton.* +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.data.InformeeTree.InvalidInformeeTree +import com.digitalasset.canton.data.MerkleTree.* +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.version.* +import com.google.common.annotations.VisibleForTesting +import monocle.Lens + +import java.util.UUID + +/** Wraps a [[GenTransactionTree]] that is also a full informee tree. + */ +// private constructor, because object invariants are checked by factory methods +final case class FullInformeeTree private (tree: GenTransactionTree)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[FullInformeeTree.type] +) extends HasProtocolVersionedWrapper[FullInformeeTree] + with PrettyPrinting { + + def validated: Either[String, this.type] = for { + _ <- InformeeTree.checkGlobalMetadata(tree) + _ <- InformeeTree.checkViews(tree.rootViews, assertFull = true) + } yield this + + @transient override protected lazy val companionObj: FullInformeeTree.type = FullInformeeTree + + lazy val transactionId: TransactionId = TransactionId.fromRootHash(tree.rootHash) + + private lazy val commonMetadata: CommonMetadata = checked(tree.commonMetadata.tryUnwrap) + lazy val domainId: DomainId = commonMetadata.domainId + lazy val mediator: MediatorRef = commonMetadata.mediator + + /** Yields the informee tree unblinded for a defined set of parties. + * If a view common data is already blinded, then it remains blinded even if one of the given parties is a stakeholder. + */ + def informeeTreeUnblindedFor( + parties: collection.Set[LfPartyId], + protocolVersion: ProtocolVersion, + ): InformeeTree = { + val rawResult = tree + .blind({ + case _: GenTransactionTree => RevealIfNeedBe + case _: SubmitterMetadata => BlindSubtree + case _: CommonMetadata => RevealSubtree + case _: ParticipantMetadata => BlindSubtree + case _: TransactionView => RevealIfNeedBe + case v: ViewCommonData => + if (v.informees.map(_.party).intersect(parties).nonEmpty) + RevealSubtree + else + BlindSubtree + case _: ViewParticipantData => BlindSubtree + }) + .tryUnwrap + InformeeTree.tryCreate(rawResult, protocolVersion) + } + + lazy val informeesAndThresholdByViewHash: Map[ViewHash, (Set[Informee], NonNegativeInt)] = + InformeeTree.viewCommonDataByViewHash(tree).map { case (hash, viewCommonData) => + hash -> ((viewCommonData.informees, viewCommonData.threshold)) + } + + lazy val informeesAndThresholdByViewPosition: Map[ViewPosition, (Set[Informee], NonNegativeInt)] = + InformeeTree.viewCommonDataByViewPosition(tree).map { case (position, viewCommonData) => + position -> ((viewCommonData.informees, viewCommonData.threshold)) + } + + lazy val allInformees: Set[LfPartyId] = InformeeTree + .viewCommonDataByViewPosition(tree) + .flatMap { case (_, viewCommonData) => viewCommonData.informees } + .map(_.party) + .toSet + + lazy val transactionUuid: UUID = checked(tree.commonMetadata.tryUnwrap).uuid + + lazy val confirmationPolicy: ConfirmationPolicy = checked( + tree.commonMetadata.tryUnwrap + ).confirmationPolicy + + def toProtoV1: v1.FullInformeeTree = + v1.FullInformeeTree(tree = Some(tree.toProtoV1)) + + override def pretty: Pretty[FullInformeeTree] = prettyOfParam(_.tree) +} + +object FullInformeeTree + extends HasProtocolVersionedWithContextCompanion[FullInformeeTree, HashOps] { + override val name: String = "FullInformeeTree" + + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)(v1.FullInformeeTree)( + supportedProtoVersion(_)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + /** Creates a full informee tree from a [[GenTransactionTree]]. + * @throws InformeeTree$.InvalidInformeeTree if `tree` is not a valid informee tree (i.e. the wrong nodes are blinded) + */ + def tryCreate(tree: GenTransactionTree, protocolVersion: ProtocolVersion): FullInformeeTree = + create(tree, protocolVersionRepresentativeFor(protocolVersion)).valueOr(err => + throw InvalidInformeeTree(err) + ) + + private[data] def create( + tree: GenTransactionTree, + representativeProtocolVersion: RepresentativeProtocolVersion[FullInformeeTree.type], + ): Either[String, FullInformeeTree] = + FullInformeeTree(tree)(representativeProtocolVersion).validated + + private[data] def create( + tree: GenTransactionTree, + protocolVersion: ProtocolVersion, + ): Either[String, FullInformeeTree] = + create(tree, protocolVersionRepresentativeFor(protocolVersion)) + + /** Lens for modifying the [[GenTransactionTree]] inside of a full informee tree. + * It does not check if the new `tree` actually constitutes a valid full informee tree, therefore: + * DO NOT USE IN PRODUCTION. + */ + @VisibleForTesting + lazy val genTransactionTreeUnsafe: Lens[FullInformeeTree, GenTransactionTree] = + Lens[FullInformeeTree, GenTransactionTree](_.tree)(newTree => + fullInformeeTree => FullInformeeTree(newTree)(fullInformeeTree.representativeProtocolVersion) + ) + + def fromProtoV1( + hashOps: HashOps, + protoInformeeTree: v1.FullInformeeTree, + ): ParsingResult[FullInformeeTree] = + for { + protoTree <- ProtoConverter.required("tree", protoInformeeTree.tree) + tree <- GenTransactionTree.fromProtoV1(hashOps, protoTree) + fullInformeeTree <- FullInformeeTree + .create(tree, protocolVersionRepresentativeFor(ProtoVersion(1))) + .leftMap(e => + ProtoDeserializationError.OtherError(s"Unable to create full informee tree: $e") + ) + } yield fullInformeeTree +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/FullTransactionViewTree.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/FullTransactionViewTree.scala new file mode 100644 index 0000000000..99b0dc3b1a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/FullTransactionViewTree.scala @@ -0,0 +1,59 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.either.* +import com.digitalasset.canton.data.TransactionViewTree.InvalidTransactionViewTree +import com.digitalasset.canton.data.ViewPosition.MerklePathElement +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} + +import scala.annotation.tailrec + +/** Wraps a `GenTransactionTree` where exactly one view (including subviews) is unblinded. + * The `commonMetadata` and `participantMetadata` are also unblinded. + * The `submitterMetadata` is unblinded if and only if the unblinded view is a root view. + */ +final case class FullTransactionViewTree private (tree: GenTransactionTree) + extends TransactionViewTree + with PrettyPrinting { + + @tailrec + private[data] override def findTheView( + viewsWithIndex: Seq[(TransactionView, MerklePathElement)], + viewPosition: ViewPosition = ViewPosition.root, + ): Either[String, (TransactionView, ViewPosition)] = { + viewsWithIndex match { + case Seq() => + Left("A transaction view tree must contain an unblinded view.") + case Seq((singleView, index)) if singleView.hasAllLeavesBlinded => + findTheView(singleView.subviews.unblindedElementsWithIndex, index +: viewPosition) + case Seq((singleView, index)) if singleView.isFullyUnblinded => + Right((singleView, index +: viewPosition)) + case Seq((singleView, _index)) => + Left(s"A transaction view tree must contain a fully unblinded view: $singleView") + case multipleViews => + Left( + s"A transaction view tree must not contain several unblinded views: ${multipleViews.map(_._1)}" + ) + } + } + + lazy val tryFlattenToParticipantViews: Seq[ParticipantTransactionView] = + view.tryFlattenToParticipantViews + + override def pretty: Pretty[FullTransactionViewTree] = prettyOfClass(unnamedParam(_.tree)) +} + +object FullTransactionViewTree { + + /** @throws TransactionViewTree$.InvalidTransactionViewTree if tree is not a transaction view tree + * (i.e. the wrong set of nodes is blinded) + */ + def tryCreate(tree: GenTransactionTree): FullTransactionViewTree = + create(tree).valueOr(msg => throw InvalidTransactionViewTree(msg)) + + def create(tree: GenTransactionTree): Either[String, FullTransactionViewTree] = + FullTransactionViewTree(tree).validated + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/GenTransactionTree.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/GenTransactionTree.scala new file mode 100644 index 0000000000..e6aefa19ea --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/GenTransactionTree.scala @@ -0,0 +1,342 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.either.* +import cats.syntax.foldable.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.ProtoDeserializationError.InvariantViolation +import com.digitalasset.canton.* +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.data.MerkleTree.* +import com.digitalasset.canton.data.ViewPosition.MerkleSeqIndexFromRoot +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.protocol.{v1, *} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.util.{EitherUtil, MonadUtil} +import com.digitalasset.canton.version.* +import com.google.common.annotations.VisibleForTesting +import monocle.Lens +import monocle.macros.GenLens + +import scala.collection.mutable + +/** Partially blinded version of a transaction tree. + * This class is also used to represent transaction view trees and informee trees. + */ +// private constructor, because object invariants are checked by factory methods +final case class GenTransactionTree private ( + submitterMetadata: MerkleTree[SubmitterMetadata], + commonMetadata: MerkleTree[CommonMetadata], + participantMetadata: MerkleTree[ParticipantMetadata], + rootViews: MerkleSeq[TransactionView], +)(hashOps: HashOps) + extends MerkleTreeInnerNode[GenTransactionTree](hashOps) { + + def validated: Either[String, this.type] = for { + _ <- checkUniqueHashes + _ <- commonMetadata.unwrap.leftMap(_ => "commonMetadata is blinded") + } yield this + + private def checkUniqueHashes: Either[String, this.type] = { + // Check that every subtree has a unique root hash + val usedHashes = mutable.Set[RootHash]() + + def go(tree: MerkleTree[_]): Either[String, this.type] = { + val rootHash = tree.rootHash + + for { + _ <- EitherUtil.condUnitE( + !usedHashes.contains(rootHash), + "A transaction tree must contain a hash at most once. " + + s"Found the hash ${rootHash.toString} twice.", + ) + _ = usedHashes.add(rootHash).discard + _ <- MonadUtil.sequentialTraverse(tree.subtrees)(go) + } yield this + } + + go(this) + } + + @VisibleForTesting + // Private, because it does not check object invariants and is therefore unsafe. + private[data] def copy( + submitterMetadata: MerkleTree[SubmitterMetadata] = this.submitterMetadata, + commonMetadata: MerkleTree[CommonMetadata] = this.commonMetadata, + participantMetadata: MerkleTree[ParticipantMetadata] = this.participantMetadata, + rootViews: MerkleSeq[TransactionView] = this.rootViews, + ): GenTransactionTree = + GenTransactionTree(submitterMetadata, commonMetadata, participantMetadata, rootViews)(hashOps) + + override def subtrees: Seq[MerkleTree[_]] = + Seq[MerkleTree[_]]( + submitterMetadata, + commonMetadata, + participantMetadata, + ) ++ rootViews.rootOrEmpty.toList + + override private[data] def withBlindedSubtrees( + blindingCommandPerNode: PartialFunction[RootHash, BlindingCommand] + ): MerkleTree[GenTransactionTree] = + GenTransactionTree( + submitterMetadata.doBlind(blindingCommandPerNode), + commonMetadata.doBlind(blindingCommandPerNode), + participantMetadata.doBlind(blindingCommandPerNode), + rootViews.doBlind(blindingCommandPerNode), + )(hashOps) + + /** Specialized blinding that addresses the case of blinding a Transaction Tree to obtain a + * Transaction View Tree. + * The view is identified by its position in the tree, specified by `viewPos`, directed from the root + * to the leaf. + * To ensure the path is valid, it should be obtained beforehand with a traversal method such as + * [[TransactionView.allSubviewsWithPosition]] + * + * @param viewPos the position of the view from root to leaf + * @throws java.lang.UnsupportedOperationException if the path does not lead to a view + */ + private[data] def tryBlindForTransactionViewTree( + viewPos: ViewPositionFromRoot + ): GenTransactionTree = { + viewPos.position match { + case (head: MerkleSeqIndexFromRoot) +: tail => + val sm = if (viewPos.isTopLevel) submitterMetadata else submitterMetadata.blindFully + val rv = rootViews.tryBlindAllButLeaf( + head, + _.tryBlindForTransactionViewTree(ViewPositionFromRoot(tail)), + ) + GenTransactionTree( + sm, + commonMetadata, + participantMetadata, + rv, + )(hashOps) + case _ => throw new UnsupportedOperationException(s"Invalid view position: $viewPos") + } + } + + lazy val transactionId: TransactionId = TransactionId.fromRootHash(rootHash) + + /** Yields the full informee tree corresponding to this transaction tree. + * The resulting informee tree is full, only if every view common data is unblinded. + */ + def tryFullInformeeTree(protocolVersion: ProtocolVersion): FullInformeeTree = { + val tree = blind({ + case _: GenTransactionTree => RevealIfNeedBe + case _: SubmitterMetadata => BlindSubtree + case _: CommonMetadata => RevealSubtree + case _: ParticipantMetadata => BlindSubtree + case _: TransactionView => RevealIfNeedBe + case _: ViewCommonData => RevealSubtree + case _: ViewParticipantData => BlindSubtree + }).tryUnwrap + FullInformeeTree.tryCreate(tree, protocolVersion) + } + + /** Finds the position of the view corresponding to the given hash. + * Returns `None` if no such view exists in this tree. + */ + def viewPosition(viewHash: RootHash): Option[ViewPosition] = { + val pos = for { + (rootView, index) <- rootViews.unblindedElementsWithIndex + (view, viewPos) <- rootView.allSubviewsWithPosition(index +: ViewPosition.root) + if view.rootHash == viewHash + } yield viewPos + + pos.headOption + } + + /** Yields the transaction view tree corresponding to a given view. + * If some subtrees have already been blinded, they will remain blinded. + * + * @throws java.lang.IllegalArgumentException if there is no transaction view in this tree with `viewHash` + */ + def transactionViewTree(viewHash: RootHash): FullTransactionViewTree = + viewPosition(viewHash) + .map(viewPos => + FullTransactionViewTree.tryCreate(tryBlindForTransactionViewTree(viewPos.reverse)) + ) + .getOrElse( + throw new IllegalArgumentException(s"No transaction view found with hash $viewHash") + ) + + lazy val allTransactionViewTrees: Seq[FullTransactionViewTree] = for { + (rootView, index) <- rootViews.unblindedElementsWithIndex + (_view, viewPos) <- rootView.allSubviewsWithPosition(index +: ViewPosition.root) + genTransactionTree = tryBlindForTransactionViewTree(viewPos.reverse) + } yield FullTransactionViewTree.tryCreate(genTransactionTree) + + def allLightTransactionViewTrees( + ): Seq[LightTransactionViewTree] = + allTransactionViewTrees.map(LightTransactionViewTree.fromTransactionViewTree) + + /** All lightweight transaction trees in this [[GenTransactionTree]], accompanied by their witnesses and randomness + * suitable for deriving encryption keys for encrypted view messages. + * + * The witnesses are useful for constructing the BCC-style recipient trees for the view messages. + * The lightweight transaction + BCC scheme requires that, for every non top-level view, + * the encryption key used to encrypt that view's lightweight transaction tree can be (deterministically) derived + * from the randomness used for the parent view. This function returns suitable randomness that is derived using + * a HKDF. For top-level views, the randomness is derived from the provided initial seed. + * + * All the returned random values have the same length as the provided initial seed. The caller should ensure that + * the provided randomness is long enough to be used for the default HMAC implementation. + */ + def allLightTransactionViewTreesWithWitnessesAndSeeds( + initSeed: SecureRandomness, + hkdfOps: HkdfOps, + ): Either[HkdfError, Seq[(LightTransactionViewTree, Witnesses, SecureRandomness)]] = { + val randomnessLength = initSeed.unwrap.size + val witnessAndSeedMapE = + allTransactionViewTrees.toList.foldLeftM( + Map.empty[ViewPosition, (Witnesses, SecureRandomness)] + ) { case (ws, tvt) => + val parentPosition = ViewPosition(tvt.viewPosition.position.drop(1)) + val (witnesses, parentSeed) = ws.get(parentPosition) match { + case Some((parentWitnesses, parentSeed)) => + parentWitnesses.prepend(tvt.informees) -> parentSeed + case None if (parentPosition.position.isEmpty) => + Witnesses(NonEmpty(Seq, tvt.informees)) -> initSeed + case None => + throw new IllegalStateException( + s"Can't find the parent witnesses for position ${tvt.viewPosition}" + ) + } + + val viewIndex = + tvt.viewPosition.position.headOption + .getOrElse(throw new IllegalStateException("View with no position")) + val seedE = hkdfOps.computeHkdf( + parentSeed.unwrap, + randomnessLength, + HkdfInfo.subview(viewIndex), + ) + seedE.map(seed => ws.updated(tvt.viewPosition, witnesses -> seed)) + } + witnessAndSeedMapE.map { witnessAndSeedMap => + allTransactionViewTrees.map { tvt => + val (witnesses, seed) = witnessAndSeedMap(tvt.viewPosition) + (LightTransactionViewTree.fromTransactionViewTree(tvt), witnesses, seed) + } + } + } + + def toProtoV1: v1.GenTransactionTree = + v1.GenTransactionTree( + submitterMetadata = Some(MerkleTree.toBlindableNodeV1(submitterMetadata)), + commonMetadata = Some(MerkleTree.toBlindableNodeV1(commonMetadata)), + participantMetadata = Some(MerkleTree.toBlindableNodeV1(participantMetadata)), + rootViews = Some(rootViews.toProtoV1), + ) + + def mapUnblindedRootViews(f: TransactionView => TransactionView): GenTransactionTree = + this.copy(rootViews = rootViews.mapM(f)) + + override def pretty: Pretty[GenTransactionTree] = prettyOfClass( + param("submitter metadata", _.submitterMetadata), + param("common metadata", _.commonMetadata), + param("participant metadata", _.participantMetadata), + param("roots", _.rootViews), + ) +} + +object GenTransactionTree { + + /** @throws GenTransactionTree$.InvalidGenTransactionTree if two subtrees have the same root hash + */ + def tryCreate(hashOps: HashOps)( + submitterMetadata: MerkleTree[SubmitterMetadata], + commonMetadata: MerkleTree[CommonMetadata], + participantMetadata: MerkleTree[ParticipantMetadata], + rootViews: MerkleSeq[TransactionView], + ): GenTransactionTree = + create(hashOps)(submitterMetadata, commonMetadata, participantMetadata, rootViews).valueOr( + err => throw InvalidGenTransactionTree(err) + ) + + /** Creates a [[GenTransactionTree]]. + * Yields `Left(...)` if two subtrees have the same root hash. + */ + def create(hashOps: HashOps)( + submitterMetadata: MerkleTree[SubmitterMetadata], + commonMetadata: MerkleTree[CommonMetadata], + participantMetadata: MerkleTree[ParticipantMetadata], + rootViews: MerkleSeq[TransactionView], + ): Either[String, GenTransactionTree] = + GenTransactionTree(submitterMetadata, commonMetadata, participantMetadata, rootViews)( + hashOps + ).validated + + /** Indicates an attempt to create an invalid [[GenTransactionTree]]. */ + final case class InvalidGenTransactionTree(message: String) extends RuntimeException(message) {} + + @VisibleForTesting + val submitterMetadataUnsafe: Lens[GenTransactionTree, MerkleTree[SubmitterMetadata]] = + GenLens[GenTransactionTree](_.submitterMetadata) + + @VisibleForTesting + val rootViewsUnsafe: Lens[GenTransactionTree, MerkleSeq[TransactionView]] = + GenLens[GenTransactionTree](_.rootViews) + + def fromProtoV1( + hashOps: HashOps, + protoTransactionTree: v1.GenTransactionTree, + ): ParsingResult[GenTransactionTree] = + for { + submitterMetadata <- MerkleTree + .fromProtoOptionV1( + protoTransactionTree.submitterMetadata, + SubmitterMetadata.fromByteString(hashOps), + ) + commonMetadata <- MerkleTree + .fromProtoOptionV1( + protoTransactionTree.commonMetadata, + CommonMetadata.fromByteString(hashOps), + ) + commonMetadataUnblinded <- commonMetadata.unwrap.leftMap(_ => + InvariantViolation("GenTransactionTree.commonMetadata is blinded") + ) + participantMetadata <- MerkleTree + .fromProtoOptionV1( + protoTransactionTree.participantMetadata, + ParticipantMetadata.fromByteString(hashOps), + ) + rootViewsP <- ProtoConverter + .required("GenTransactionTree.rootViews", protoTransactionTree.rootViews) + rootViews <- MerkleSeq.fromProtoV1( + ( + hashOps, + TransactionView.fromByteString(ProtoVersion(1))( + (hashOps, commonMetadataUnblinded.confirmationPolicy) + ), + ), + rootViewsP, + ) + genTransactionTree <- createGenTransactionTreeV0V1( + hashOps, + submitterMetadata, + commonMetadata, + participantMetadata, + rootViews, + ) + } yield genTransactionTree + + def createGenTransactionTreeV0V1( + hashOps: HashOps, + submitterMetadata: MerkleTree[SubmitterMetadata], + commonMetadata: MerkleTree[CommonMetadata], + participantMetadata: MerkleTree[ParticipantMetadata], + rootViews: MerkleSeq[TransactionView], + ): ParsingResult[GenTransactionTree] = + GenTransactionTree + .create(hashOps)( + submitterMetadata, + commonMetadata, + participantMetadata, + rootViews, + ) + .leftMap(e => ProtoDeserializationError.OtherError(s"Unable to create transaction tree: $e")) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/GenTransferViewTree.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/GenTransferViewTree.scala new file mode 100644 index 0000000000..0f46275554 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/GenTransferViewTree.scala @@ -0,0 +1,91 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.either.* +import com.digitalasset.canton.ProtoDeserializationError.OtherError +import com.digitalasset.canton.crypto.HashOps +import com.digitalasset.canton.data.MerkleTree.{BlindSubtree, RevealIfNeedBe, RevealSubtree} +import com.digitalasset.canton.protocol.{ViewHash, v1} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.version.{ + HasProtocolVersionedWrapper, + ProtocolVersion, + VersionedMessage, +} +import com.google.protobuf.ByteString + +/** A transfer request tree has two children: + * The `commonData` for the mediator and the involved participants + * and the `view` only for the involved participants. + */ +abstract class GenTransferViewTree[ + CommonData <: HasProtocolVersionedWrapper[CommonData], + View <: HasProtocolVersionedWrapper[View], + Tree, + MediatorMessage, +] protected (commonData: MerkleTree[CommonData], participantData: MerkleTree[View])( + hashOps: HashOps +) extends MerkleTreeInnerNode[Tree](hashOps) { this: Tree => + + val viewPosition: ViewPosition = + ViewPosition.root // Use a dummy value, as there is only one view. + + override def subtrees: Seq[MerkleTree[_]] = Seq(commonData, participantData) + + /* + This method is visible because we need the non-deterministic serialization only when we encrypt the tree, + but the message to the mediator is sent unencrypted. + + The versioning does not play well with this parametrized class so we define the serialization + method explicitly. + */ + private def toProtoVersioned(version: ProtocolVersion): VersionedMessage[TransferViewTree] = + VersionedMessage(toProtoV1.toByteString, 1) + + def toByteString(version: ProtocolVersion): ByteString = toProtoVersioned(version).toByteString + + // If you add new versions, take `version` into account in `toProtoVersioned` above + def toProtoV1: v1.TransferViewTree = + v1.TransferViewTree( + commonData = Some(MerkleTree.toBlindableNodeV1(commonData)), + participantData = Some(MerkleTree.toBlindableNodeV1(participantData)), + ) + + def viewHash: ViewHash = ViewHash.fromRootHash(rootHash) + + /** Blinds the transfer view tree such that the `view` is blinded and the `commonData` remains revealed. */ + def mediatorMessage: MediatorMessage = { + val blinded = blind { + case root if root eq this => RevealIfNeedBe + case `commonData` => RevealSubtree + case `participantData` => BlindSubtree + } + createMediatorMessage(blinded.tryUnwrap) + } + + /** Creates the mediator message from an appropriately blinded transfer view tree. */ + protected[this] def createMediatorMessage(blindedTree: Tree): MediatorMessage +} + +object GenTransferViewTree { + private[data] def fromProtoV1[CommonData, View, Tree]( + deserializeCommonData: ByteString => ParsingResult[MerkleTree[ + CommonData + ]], + deserializeView: ByteString => ParsingResult[MerkleTree[View]], + )( + createTree: (MerkleTree[CommonData], MerkleTree[View]) => Tree + )(treeP: v1.TransferViewTree): ParsingResult[Tree] = { + val v1.TransferViewTree(commonDataP, viewP) = treeP + for { + commonData <- MerkleTree + .fromProtoOptionV1(commonDataP, deserializeCommonData(_)) + .leftMap(error => OtherError(s"transferCommonData: $error")) + view <- MerkleTree + .fromProtoOptionV1(viewP, deserializeView(_)) + .leftMap(error => OtherError(s"transferView: $error")) + } yield createTree(commonData, view) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/Informee.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/Informee.scala new file mode 100644 index 0000000000..75422f84b3 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/Informee.scala @@ -0,0 +1,108 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.either.* +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.v1 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.transaction.TrustLevel +import com.digitalasset.canton.{LfPartyId, ProtoDeserializationError} + +/** A party that must be informed about the view. + */ +// This class is a reference example of serialization best practices. +// In particular, it demonstrates serializing a trait with different subclasses. +// The design is quite simple. It should be applied whenever possible, but it will not cover all cases. +// +// Please consult the team if you intend to change the design of serialization. +sealed trait Informee extends Product with Serializable with PrettyPrinting { + def party: LfPartyId + + /** Determines how much "impact" the informee has on approving / rejecting the underlying view. + * + * Positive value: confirming party + * Zero: plain informee, who sees the underlying view, but has no impact on approving / rejecting it + */ + def weight: NonNegativeInt + + def requiredTrustLevel: TrustLevel + + /** Yields an informee resulting from adding `delta` to `weight`. + * + * If the new weight is zero, the resulting informee will be a plain informee; + * in thise case, the resulting informee will have trust level ORDINARY irrespective of the trust level of this. + */ + def withAdditionalWeight(delta: NonNegativeInt): Informee + + /** Creates the v1-proto version of an informee. + * + * Plain informees get weight 0. + * Confirming parties get their assigned (positive) weight. + */ + private[data] def toProtoV1: v1.Informee = + v1.Informee( + party = party, + weight = weight.unwrap, + requiredTrustLevel = requiredTrustLevel.toProtoEnum, + ) + + override def pretty: Pretty[Informee] = + prettyOfString(inst => show"${inst.party}*${inst.weight} $requiredTrustLevel") +} + +object Informee { + + def create( + party: LfPartyId, + weight: NonNegativeInt, + requiredTrustLevel: TrustLevel, + ): Informee = + if (weight == NonNegativeInt.zero) PlainInformee(party) + else ConfirmingParty(party, PositiveInt.tryCreate(weight.unwrap), requiredTrustLevel) + + private[data] def fromProtoV1(informeeP: v1.Informee): ParsingResult[Informee] = { + val v1.Informee(partyP, weightP, requiredTrustLevelP) = informeeP + for { + party <- LfPartyId + .fromString(partyP) + .leftMap(ProtoDeserializationError.ValueDeserializationError("party", _)) + requiredTrustLevel <- TrustLevel.fromProtoEnum(requiredTrustLevelP) + + weight <- NonNegativeInt + .create(weightP) + .leftMap(err => ProtoDeserializationError.InvariantViolation(err.message)) + } yield Informee.create(party, weight, requiredTrustLevel) + } +} + +/** A party that must confirm the underlying view. + * + * @param weight determines the impact of the party on whether the view is approved. + */ +final case class ConfirmingParty( + party: LfPartyId, + partyWeight: PositiveInt, + requiredTrustLevel: TrustLevel, +) extends Informee { + + val weight: NonNegativeInt = partyWeight.toNonNegative + + def withAdditionalWeight(delta: NonNegativeInt): Informee = { + copy(partyWeight = partyWeight + delta) + } +} + +/** An informee that is not a confirming party + */ +final case class PlainInformee(party: LfPartyId) extends Informee { + override val weight: NonNegativeInt = NonNegativeInt.zero + + override val requiredTrustLevel: TrustLevel = TrustLevel.Ordinary + + def withAdditionalWeight(delta: NonNegativeInt): Informee = + if (delta == NonNegativeInt.zero) this + else ConfirmingParty(party, PositiveInt.tryCreate(delta.unwrap), requiredTrustLevel) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/InformeeTree.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/InformeeTree.scala new file mode 100644 index 0000000000..449888c865 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/InformeeTree.scala @@ -0,0 +1,177 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.either.* +import com.digitalasset.canton.* +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.util.EitherUtil +import com.digitalasset.canton.version.* +import com.google.common.annotations.VisibleForTesting +import monocle.Lens + +/** Encapsulates a [[GenTransactionTree]] that is also an informee tree. + */ +// private constructor, because object invariants are checked by factory methods +final case class InformeeTree private (tree: GenTransactionTree)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[InformeeTree.type] +) extends HasProtocolVersionedWrapper[InformeeTree] { + + def validated: Either[String, this.type] = for { + _ <- InformeeTree.checkGlobalMetadata(tree) + _ <- InformeeTree.checkViews(tree.rootViews, assertFull = false) + } yield this + + @transient override protected lazy val companionObj: InformeeTree.type = InformeeTree + + lazy val transactionId: TransactionId = TransactionId.fromRootHash(tree.rootHash) + + lazy val informeesByViewHash: Map[ViewHash, Set[Informee]] = + InformeeTree.viewCommonDataByViewHash(tree).map { case (hash, viewCommonData) => + hash -> viewCommonData.informees + } + + private lazy val commonMetadata = checked(tree.commonMetadata.tryUnwrap) + + def domainId: DomainId = commonMetadata.domainId + + def mediator: MediatorRef = commonMetadata.mediator + + def toProtoV1: v1.InformeeTree = v1.InformeeTree(tree = Some(tree.toProtoV1)) +} + +object InformeeTree extends HasProtocolVersionedWithContextCompanion[InformeeTree, HashOps] { + override val name: String = "InformeeTree" + + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)(v1.InformeeTree)( + supportedProtoVersion(_)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + /** Creates an informee tree from a [[GenTransactionTree]]. + * @throws InformeeTree$.InvalidInformeeTree if `tree` is not a valid informee tree (i.e. the wrong nodes are blinded) + */ + def tryCreate( + tree: GenTransactionTree, + protocolVersion: ProtocolVersion, + ): InformeeTree = create(tree, protocolVersionRepresentativeFor(protocolVersion)).valueOr(err => + throw InvalidInformeeTree(err) + ) + + /** Creates an [[InformeeTree]] from a [[GenTransactionTree]]. + * Yields `Left(...)` if `tree` is not a valid informee tree (i.e. the wrong nodes are blinded) + */ + private[data] def create( + tree: GenTransactionTree, + representativeProtocolVersion: RepresentativeProtocolVersion[InformeeTree.type], + ): Either[String, InformeeTree] = InformeeTree(tree)(representativeProtocolVersion).validated + + /** Creates an [[InformeeTree]] from a [[GenTransactionTree]]. + * Yields `Left(...)` if `tree` is not a valid informee tree (i.e. the wrong nodes are blinded) + */ + private[data] def create( + tree: GenTransactionTree, + protocolVersion: ProtocolVersion, + ): Either[String, InformeeTree] = create(tree, protocolVersionRepresentativeFor(protocolVersion)) + + private[data] def checkGlobalMetadata(tree: GenTransactionTree): Either[String, Unit] = { + val errors = Seq.newBuilder[String] + + if (tree.submitterMetadata.unwrap.isRight) + errors += "The submitter metadata of an informee tree must be blinded." + if (tree.commonMetadata.unwrap.isLeft) + errors += "The common metadata of an informee tree must be unblinded." + if (tree.participantMetadata.unwrap.isRight) + errors += "The participant metadata of an informee tree must be blinded." + + val message = errors.result().mkString(" ") + EitherUtil.condUnitE(message.isEmpty, message) + } + + private[data] def checkViews( + rootViews: MerkleSeq[TransactionView], + assertFull: Boolean, + ): Either[String, Unit] = { + + val errors = Seq.newBuilder[String] + + def checkIsEmpty(blinded: Seq[RootHash]): Unit = + if (assertFull && blinded.nonEmpty) { + val hashes = blinded.map(_.toString).mkString(", ") + errors += s"All views in a full informee tree must be unblinded. Found $hashes" + } + + checkIsEmpty(rootViews.blindedElements) + + def go(wrappedViews: Seq[TransactionView]): Unit = + wrappedViews.foreach { view => + checkIsEmpty(view.subviews.blindedElements) + + if (assertFull && view.viewCommonData.unwrap.isLeft) + errors += s"The view common data in a full informee tree must be unblinded. Found ${view.viewCommonData}." + + if (view.viewParticipantData.unwrap.isRight) + errors += s"The view participant data in an informee tree must be blinded. Found ${view.viewParticipantData}." + + go(view.subviews.unblindedElements) + } + + go(rootViews.unblindedElements) + + val message = errors.result().mkString("\n") + EitherUtil.condUnitE(message.isEmpty, message) + } + + /** Lens for modifying the [[GenTransactionTree]] inside of an informee tree. + * It does not check if the new `tree` actually constitutes a valid informee tree, therefore: + * DO NOT USE IN PRODUCTION. + */ + @VisibleForTesting + def genTransactionTreeUnsafe: Lens[InformeeTree, GenTransactionTree] = + Lens[InformeeTree, GenTransactionTree](_.tree)(newTree => + oldInformeeTree => InformeeTree(newTree)(oldInformeeTree.representativeProtocolVersion) + ) + + private[data] def viewCommonDataByViewHash( + tree: GenTransactionTree + ): Map[ViewHash, ViewCommonData] = + tree.rootViews.unblindedElements + .flatMap(_.flatten) + .map(view => view.viewHash -> view.viewCommonData.unwrap) + .collect { case (hash, Right(viewCommonData)) => hash -> viewCommonData } + .toMap + + private[data] def viewCommonDataByViewPosition( + tree: GenTransactionTree + ): Map[ViewPosition, ViewCommonData] = + tree.rootViews.unblindedElementsWithIndex + .flatMap { case (view, index) => + view.allSubviewsWithPosition(ViewPosition(List(index))).map { case (subview, position) => + position -> subview.viewCommonData.unwrap + } + } + .collect { case (position, Right(viewCommonData)) => position -> viewCommonData } + .toMap + + /** Indicates an attempt to create an invalid [[InformeeTree]] or [[FullInformeeTree]]. */ + final case class InvalidInformeeTree(message: String) extends RuntimeException(message) {} + + private def fromProtoV1( + hashOps: HashOps, + protoInformeeTree: v1.InformeeTree, + ): ParsingResult[InformeeTree] = + for { + protoTree <- ProtoConverter.required("tree", protoInformeeTree.tree) + tree <- GenTransactionTree.fromProtoV1(hashOps, protoTree) + informeeTree <- InformeeTree + .create(tree, protocolVersionRepresentativeFor(ProtoVersion(1))) + .leftMap(e => ProtoDeserializationError.OtherError(s"Unable to create informee tree: $e")) + } yield informeeTree +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/KeyResolution.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/KeyResolution.scala new file mode 100644 index 0000000000..843f6c5a22 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/KeyResolution.scala @@ -0,0 +1,94 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.traverse.* +import com.digitalasset.canton.LfPartyId +import com.digitalasset.canton.ProtoDeserializationError.FieldNotSet +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.ContractIdSyntax.* +import com.digitalasset.canton.protocol.{LfContractId, LfTransactionVersion, v3} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult + +sealed trait KeyResolution extends Product with Serializable with PrettyPrinting { + def resolution: Option[LfContractId] + + /** lf version of the key */ + def version: LfTransactionVersion +} + +sealed trait KeyResolutionWithMaintainers extends KeyResolution { + def maintainers: Set[LfPartyId] + + def asSerializable: SerializableKeyResolution +} + +sealed trait SerializableKeyResolution extends KeyResolution { + def toProtoOneOfV0: v3.ViewParticipantData.ResolvedKey.Resolution +} + +object SerializableKeyResolution { + def fromProtoOneOfV3( + resolutionP: v3.ViewParticipantData.ResolvedKey.Resolution, + version: LfTransactionVersion, + ): ParsingResult[SerializableKeyResolution] = + resolutionP match { + case v3.ViewParticipantData.ResolvedKey.Resolution.ContractId(contractIdP) => + ProtoConverter + .parseLfContractId(contractIdP) + .map(AssignedKey(_)(version)) + case v3.ViewParticipantData.ResolvedKey.Resolution + .Free(v3.ViewParticipantData.FreeKey(maintainersP)) => + maintainersP + .traverse(ProtoConverter.parseLfPartyId) + .map(maintainers => FreeKey(maintainers.toSet)(version)) + case v3.ViewParticipantData.ResolvedKey.Resolution.Empty => + Left(FieldNotSet("ViewParticipantData.ResolvedKey.resolution")) + } +} + +final case class AssignedKey(contractId: LfContractId)( + override val version: LfTransactionVersion +) extends SerializableKeyResolution { + override def pretty: Pretty[AssignedKey] = + prettyNode("Assigned", unnamedParam(_.contractId)) + + override def resolution: Option[LfContractId] = Some(contractId) + + override def toProtoOneOfV0: v3.ViewParticipantData.ResolvedKey.Resolution = + v3.ViewParticipantData.ResolvedKey.Resolution.ContractId(value = contractId.toProtoPrimitive) +} + +final case class FreeKey(override val maintainers: Set[LfPartyId])( + override val version: LfTransactionVersion +) extends SerializableKeyResolution + with KeyResolutionWithMaintainers { + override def pretty: Pretty[FreeKey] = prettyNode("Free", param("maintainers", _.maintainers)) + + override def resolution: Option[LfContractId] = None + + override def toProtoOneOfV0: v3.ViewParticipantData.ResolvedKey.Resolution = + v3.ViewParticipantData.ResolvedKey.Resolution.Free( + value = v3.ViewParticipantData.FreeKey(maintainers = maintainers.toSeq) + ) + + override def asSerializable: SerializableKeyResolution = this +} + +final case class AssignedKeyWithMaintainers( + contractId: LfContractId, + override val maintainers: Set[LfPartyId], +)(override val version: LfTransactionVersion) + extends KeyResolutionWithMaintainers { + override def resolution: Option[LfContractId] = Some(contractId) + + override def pretty: Pretty[AssignedKeyWithMaintainers] = prettyOfClass( + unnamedParam(_.contractId), + param("maintainers", _.maintainers), + ) + + override def asSerializable: SerializableKeyResolution = + AssignedKey(contractId)(version) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/LightTransactionViewTree.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/LightTransactionViewTree.scala new file mode 100644 index 0000000000..2c8c6f9d15 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/LightTransactionViewTree.scala @@ -0,0 +1,224 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.digitalasset.canton.* +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.data.ViewPosition.MerklePathElement +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.util.EitherUtil +import com.digitalasset.canton.version.* +import monocle.PLens + +import scala.annotation.tailrec +import scala.collection.mutable + +/** Wraps a `GenTransactionTree` where exactly one view (not including subviews) is unblinded. + * The `commonMetadata` and `participantMetadata` are also unblinded. + * The `submitterMetadata` is unblinded if and only if the unblinded view is a root view. + * + * @throws LightTransactionViewTree$.InvalidLightTransactionViewTree if [[tree]] is not a light transaction view tree + * (i.e. the wrong set of nodes is blinded) + */ + +sealed abstract case class LightTransactionViewTree private[data] ( + tree: GenTransactionTree, + override val subviewHashes: Seq[ViewHash], +) extends TransactionViewTree + with HasVersionedWrapper[LightTransactionViewTree] + with PrettyPrinting { + + @tailrec + private[data] override def findTheView( + viewsWithIndex: Seq[(TransactionView, MerklePathElement)], + viewPosition: ViewPosition = ViewPosition.root, + ): Either[String, (TransactionView, ViewPosition)] = { + viewsWithIndex match { + case Seq() => + Left("A light transaction view tree must contain an unblinded view.") + case Seq((singleView, index)) if singleView.hasAllLeavesBlinded => + findTheView(singleView.subviews.unblindedElementsWithIndex, index +: viewPosition) + case Seq((singleView, index)) + if singleView.viewCommonData.isFullyUnblinded && singleView.viewParticipantData.isFullyUnblinded && singleView.subviews.areFullyBlinded => + Right((singleView, index +: viewPosition)) + case Seq((singleView, _index)) => + Left(s"Invalid blinding in a light transaction view tree: $singleView") + case multipleViews => + Left( + s"A transaction view tree must not contain several (partially) unblinded views: " + + s"${multipleViews.map(_._1)}" + ) + } + } + + override def validated: Either[String, this.type] = for { + + _ <- super[TransactionViewTree].validated + + // Check that the subview hashes are consistent with the tree + _ <- EitherUtil.condUnitE( + view.subviewHashesConsistentWith(subviewHashes), + s"The provided subview hashes are inconsistent with the provided view (view: ${view.viewHash} " + + s"at position: $viewPosition, subview hashes: $subviewHashes)", + ) + + } yield this + + override protected def companionObj = LightTransactionViewTree + + def toProtoV1: v1.LightTransactionViewTree = + v1.LightTransactionViewTree( + tree = Some(tree.toProtoV1), + subviewHashes = subviewHashes.map(_.toProtoPrimitive), + ) + + override lazy val pretty: Pretty[LightTransactionViewTree] = prettyOfClass(unnamedParam(_.tree)) +} + +object LightTransactionViewTree + extends HasVersionedMessageWithContextCompanion[LightTransactionViewTree, HashOps] { + override val name: String = "LightTransactionViewTree" + + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> ProtoCodec( + ProtocolVersion.v30, + supportedProtoVersion(v1.LightTransactionViewTree)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + final case class InvalidLightTransactionViewTree(message: String) + extends RuntimeException(message) + + /** @throws InvalidLightTransactionViewTree if the tree is not a legal lightweight transaction view tree + */ + def tryCreate( + tree: GenTransactionTree, + subviewHashes: Seq[ViewHash], + ): LightTransactionViewTree = + create(tree, subviewHashes).valueOr(err => throw InvalidLightTransactionViewTree(err)) + + def create( + tree: GenTransactionTree, + subviewHashes: Seq[ViewHash], + ): Either[String, LightTransactionViewTree] = + new LightTransactionViewTree(tree, subviewHashes) {}.validated + + private def fromProtoV1( + hashOps: HashOps, + protoT: v1.LightTransactionViewTree, + ): ParsingResult[LightTransactionViewTree] = + for { + protoTree <- ProtoConverter.required("tree", protoT.tree) + tree <- GenTransactionTree.fromProtoV1(hashOps, protoTree) + subviewHashes <- protoT.subviewHashes.traverse(ViewHash.fromProtoPrimitive) + result <- LightTransactionViewTree + .create(tree, subviewHashes) + .leftMap(e => + ProtoDeserializationError.InvariantViolation(s"Unable to create transaction tree: $e") + ) + } yield result + + /** Converts a sequence of light transaction view trees to the corresponding full view trees. + * A light transaction view tree can be converted to its corresponding full view tree if and only if + * all descendants can be converted. + * + * To make the method more generic, light view trees are represented as `A` and full view trees as `B` and the + * `lens` parameter is used to convert between these types, as needed. + * + * @param topLevelOnly whether to return only top-level full view trees + * @param lightViewTrees the light transaction view trees to convert + * @return A triple consisting of (1) the full view trees that could be converted, + * (2) the light view trees that could not be converted due to missing descendants, and + * (3) duplicate light view trees in the input. + * The view trees in the output are sorted by view position, i.e., in pre-order. + * If the input contains the same view several times, then + * the output (1) contains one occurrence and the output (3) every other occurrence of the view. + */ + def toFullViewTrees[A, B]( + lens: PLens[A, B, LightTransactionViewTree, FullTransactionViewTree], + protocolVersion: ProtocolVersion, + hashOps: HashOps, + topLevelOnly: Boolean, + )( + lightViewTrees: Seq[A] + ): (Seq[B], Seq[A], Seq[A]) = { + + val lightViewTreesBoxedInPostOrder = lightViewTrees + .sortBy(lens.get(_).viewPosition)(ViewPosition.orderViewPosition.toOrdering) + .reverse + + // All reconstructed full views + val fullViewByHash = mutable.Map.empty[ViewHash, TransactionView] + // All reconstructed full view trees, boxed, paired with their view hashes. + val allFullViewTreesInPreorderB = mutable.ListBuffer.empty[(ViewHash, B)] + // All light view trees, boxed, that could not be reconstructed to full view trees, due to missing descendants + val invalidLightViewTreesB = Seq.newBuilder[A] + // All duplicate light view trees, boxed. + val duplicateLightViewTreesB = Seq.newBuilder[A] + // All hashes of non-toplevel full view trees that could be reconstructed + val subviewHashesB = Set.newBuilder[ViewHash] + + for (lightViewTreeBoxed <- lightViewTreesBoxedInPostOrder) { + val lightViewTree = lens.get(lightViewTreeBoxed) + val subviewHashes = lightViewTree.subviewHashes.toSet + val missingSubviews = subviewHashes -- fullViewByHash.keys + + if (missingSubviews.isEmpty) { + val fullSubviewsSeq = lightViewTree.subviewHashes.map(fullViewByHash) + val fullSubviews = TransactionSubviews(fullSubviewsSeq)(protocolVersion, hashOps) + val fullView = lightViewTree.view.copy(subviews = fullSubviews) + val fullViewTree = FullTransactionViewTree.tryCreate( + lightViewTree.tree.mapUnblindedRootViews(_.replace(fullView.viewHash, fullView)) + ) + val fullViewTreeBoxed = lens.replace(fullViewTree)(lightViewTreeBoxed) + + if (topLevelOnly) + subviewHashesB ++= subviewHashes + if (fullViewByHash.contains(fullViewTree.viewHash)) { + // Deduplicate views + duplicateLightViewTreesB += lightViewTreeBoxed + } else { + (fullViewTree.viewHash -> fullViewTreeBoxed) +=: allFullViewTreesInPreorderB + fullViewByHash += fullView.viewHash -> fullView + } + } else { + invalidLightViewTreesB += lightViewTreeBoxed + } + } + + val allSubviewHashes = subviewHashesB.result() + val allFullViewTreesInPreorder = + allFullViewTreesInPreorderB + .result() + .collect { + case (viewHash, fullViewTreeBoxed) + if !topLevelOnly || !allSubviewHashes.contains(viewHash) => + fullViewTreeBoxed + } + + ( + allFullViewTreesInPreorder, + invalidLightViewTreesB.result().reverse, + duplicateLightViewTreesB.result().reverse, + ) + } + + /** Turns a full transaction view tree into a lightweight one. Not stack-safe. */ + def fromTransactionViewTree( + tvt: FullTransactionViewTree + ): LightTransactionViewTree = { + val withBlindedSubviews = tvt.view.copy(subviews = tvt.view.subviews.blindFully) + val genTransactionTree = + tvt.tree.mapUnblindedRootViews(_.replace(tvt.viewHash, withBlindedSubviews)) + // By definition, the view in a TransactionViewTree has all subviews unblinded + LightTransactionViewTree.tryCreate(genTransactionTree, tvt.subviewHashes) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/MerkleSeq.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/MerkleSeq.scala new file mode 100644 index 0000000000..d282ffc215 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/MerkleSeq.scala @@ -0,0 +1,654 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.crypto.HashOps +import com.digitalasset.canton.data.MerkleSeq.MerkleSeqElement +import com.digitalasset.canton.data.MerkleTree.* +import com.digitalasset.canton.data.ViewPosition.MerkleSeqIndex.Direction +import com.digitalasset.canton.data.ViewPosition.{ + MerklePathElement, + MerkleSeqIndex, + MerkleSeqIndexFromRoot, +} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.{RootHash, v1} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.version.* +import com.google.protobuf.ByteString + +import scala.annotation.tailrec + +/** Wraps a sequence that is also a [[MerkleTree]]. + * Elements are arranged in a balanced binary tree. As a result, if all except one element are blinded, the + * resulting MerkleSeq has size logarithmic in the size of the fully unblinded MerkleSeq. + * + * @param rootOrEmpty the root element or `None` if the sequence is empty + * @tparam M the type of elements + */ +final case class MerkleSeq[+M <: VersionedMerkleTree[_]]( + rootOrEmpty: Option[MerkleTree[MerkleSeqElement[M]]] +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[MerkleSeq.type], + hashOps: HashOps, +) extends PrettyPrinting + with HasProtocolVersionedWrapper[ + MerkleSeq[VersionedMerkleTree[_]] + ] { + + /** Obtain a representative protocol version for a [[MerkleSeqElement]] by casting ours. + * + * This is possible because currently there is a close connection between the versioning of these two structures. + * Only use this in edge cases, where obtaining a representative for [[MerkleSeqElement]] is not possible without + * making unsafe assumptions. + * + * WARNING: /!\ This will blow up if (when?) the versioning of the two structures diverges. /!\ + */ + private[data] lazy val tryMerkleSeqElementRepresentativeProtocolVersion + : RepresentativeProtocolVersion[MerkleSeqElement.type] = { + castRepresentativeProtocolVersion[MerkleSeqElement.type](MerkleSeqElement) + .valueOr(e => throw new IllegalArgumentException(e)) + } + + lazy val unblindedElementsWithIndex: Seq[(M, MerklePathElement)] = rootOrEmpty match { + case Some(root) => + root.unwrap match { + case Right(unblindedRoot) => unblindedRoot.unblindedElements + case Left(_) => Seq.empty + } + case None => Seq.empty + } + + lazy val unblindedElements: Seq[M] = unblindedElementsWithIndex.map(_._1) + + lazy val blindedElements: Seq[RootHash] = rootOrEmpty match { + case Some(root) => root.unwrap.fold(Seq(_), _.blindedElements) + case None => Seq.empty + } + + /** Converts this to a Seq. + * The resulting seq may be shorter than the underlying fully unblinded seq, + * because neighbouring blinded elements may be blinded into a single node. + */ + lazy val toSeq: Seq[MerkleTree[M]] = rootOrEmpty match { + case Some(t) => MerkleSeqElement.seqOf(t) + case None => Seq.empty + } + + def blindFully: MerkleSeq[M] = rootOrEmpty.fold(this)(root => + MerkleSeq(Some(root.blindFully))(representativeProtocolVersion, hashOps) + ) + + def isFullyBlinded: Boolean = rootOrEmpty.fold(true)(_.unwrap.isLeft) + + lazy val rootHashO: Option[RootHash] = rootOrEmpty.map(_.rootHash) + + private[data] def doBlind( + optimizedBlindingPolicy: PartialFunction[RootHash, BlindingCommand] + ): MerkleSeq[M] = + rootOrEmpty match { + case Some(root) => + optimizedBlindingPolicy(root.rootHash) match { + case BlindSubtree => + MerkleSeq(Some(BlindedNode[MerkleSeqElement[M]](root.rootHash)))( + representativeProtocolVersion, + hashOps, + ) + case RevealSubtree => this + case RevealIfNeedBe => + val blindedRoot = root.withBlindedSubtrees(optimizedBlindingPolicy) + MerkleSeq(Some(blindedRoot))(representativeProtocolVersion, hashOps) + } + case None => this + } + + /** Blind everything in this MerkleSeq, except the leaf identified by the given path. + * To ensure the path is valid, it should be obtained beforehand with a traversal + * method such as [[unblindedElementsWithIndex]] and reversed with [[ViewPosition.reverse]]. + * + * @param path the path from root to leaf + * @param actionOnLeaf an action to transform the leaf once it is found + * @throws java.lang.UnsupportedOperationException if the path does not lead to an unblinded leaf + */ + def tryBlindAllButLeaf[A <: VersionedMerkleTree[A]]( + path: MerkleSeqIndexFromRoot, + actionOnLeaf: M => A, + // Ideally, we would have `actionOnLeaf: M => M`, as there is no need to change the type of the + // leaf when blinding. Unfortunately, this becomes harder in practice: since M is covariant, + // the type checker does not know the actual type at runtime and could still mishandle it. + ): MerkleSeq[A] = { + rootOrEmpty match { + case Some(root) => + MerkleSeq(Some(root.tryUnwrap.tryBlindAllButLeaf(path, actionOnLeaf)))( + representativeProtocolVersion, + hashOps, + ) + case None => throw new UnsupportedOperationException("Empty MerkleSeq") + } + } + + def toProtoV1: v1.MerkleSeq = + v1.MerkleSeq(rootOrEmpty = rootOrEmpty.map(MerkleTree.toBlindableNodeV1)) + + override def pretty: Pretty[MerkleSeq.this.type] = prettyOfClass( + param("root hash", _.rootHashO, _.rootOrEmpty.exists(!_.isBlinded)), + unnamedParamIfDefined(_.rootOrEmpty), + ) + + def mapM[A <: VersionedMerkleTree[A]](f: M => A): MerkleSeq[A] = { + this.copy(rootOrEmpty = rootOrEmpty.map(_.unwrap.fold(BlindedNode(_), seq => seq.mapM(f))))( + representativeProtocolVersion, + hashOps, + ) + } + + @transient override protected lazy val companionObj: MerkleSeq.type = MerkleSeq +} + +object MerkleSeq + extends HasProtocolVersionedWithContextCompanion[ + MerkleSeq[VersionedMerkleTree[_]], + ( + HashOps, + // This function is the deserializer for unblinded nodes + ByteString => ParsingResult[MerkleTree[VersionedMerkleTree[_]]], + ), + ] { + + override def name: String = "MerkleSeq" + + override def supportedProtoVersions: SupportedProtoVersions = + SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)(v1.MerkleSeq)( + supportedProtoVersion(_)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + private type Path = List[Direction] + private def emptyPath: Path = List.empty[Direction] + + sealed trait MerkleSeqElement[+M <: VersionedMerkleTree[_]] + extends MerkleTree[MerkleSeqElement[M]] + with HasProtocolVersionedWrapper[MerkleSeqElement[VersionedMerkleTree[_]]] + with Product + with Serializable { + @transient override protected lazy val companionObj: MerkleSeqElement.type = MerkleSeqElement + + lazy val unblindedElements: Seq[(M, MerklePathElement)] = computeUnblindedElements() + + // Doing this in a separate method, as Idea would otherwise complain about a covariant type parameter + // being used in a contravariant position. + private def computeUnblindedElements(): Seq[(M, MerklePathElement)] = { + val builder = Seq.newBuilder[(M, MerklePathElement)] + foreachUnblindedElement(emptyPath)((x, path) => builder.+=(x -> MerkleSeqIndex(path))) + builder.result() + } + + private[MerkleSeq] def foreachUnblindedElement(path: Path)(body: (M, Path) => Unit): Unit + + def blindedElements: Seq[RootHash] = { + val builder = scala.collection.mutable.Seq.newBuilder[RootHash] + foreachBlindedElement(builder.+=(_)) + builder.result() + }.toSeq + + def toSeq: Seq[MerkleTree[M]] + + private[MerkleSeq] def foreachBlindedElement(body: RootHash => Unit): Unit + + // We repeat this here to enforce a more specific return type. + override private[data] def withBlindedSubtrees( + optimizedBlindingPolicy: PartialFunction[RootHash, BlindingCommand] + ): MerkleSeqElement[M] + + def tryBlindAllButLeaf[A <: VersionedMerkleTree[A]]( + path: MerkleSeqIndexFromRoot, + actionOnLeaf: M => A, + ): MerkleSeqElement[A] + + def mapM[A <: VersionedMerkleTree[A]](f: M => A): MerkleSeqElement[A] + + def toProtoV1: v1.MerkleSeqElement + } + + object Branch { + def apply[M <: VersionedMerkleTree[_]]( + first: MerkleTree[MerkleSeqElement[M]], + second: MerkleTree[MerkleSeqElement[M]], + protocolVersion: ProtocolVersion, + )( + hashOps: HashOps + ): Branch[M] = { + new Branch[M]( + first, + second, + MerkleSeqElement.protocolVersionRepresentativeFor(protocolVersion), + )(hashOps) + } + } + + private[data] final case class Branch[+M <: VersionedMerkleTree[_]]( + first: MerkleTree[MerkleSeqElement[M]], + second: MerkleTree[MerkleSeqElement[M]], + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + MerkleSeqElement.type + ], + )( + hashOps: HashOps + ) extends MerkleTreeInnerNode[Branch[M]](hashOps) + with MerkleSeqElement[M] { + + override def subtrees: Seq[MerkleTree[_]] = Seq(first, second) + + override def toSeq: Seq[MerkleTree[M]] = + MerkleSeqElement.seqOf(first) ++ MerkleSeqElement.seqOf(second) + + override private[data] def withBlindedSubtrees( + optimizedBlindingPolicy: PartialFunction[RootHash, MerkleTree.BlindingCommand] + ): Branch[M] = + Branch( + first.doBlind(optimizedBlindingPolicy), + second.doBlind(optimizedBlindingPolicy), + representativeProtocolVersion, + )( + hashOps + ) + + override def tryBlindAllButLeaf[A <: VersionedMerkleTree[A]]( + path: MerkleSeqIndexFromRoot, + actionOnLeaf: M => A, + ): MerkleSeqElement[A] = { + path.index match { + case Direction.Left :: tailIndex => + Branch[A]( + first.tryUnwrap.tryBlindAllButLeaf(MerkleSeqIndexFromRoot(tailIndex), actionOnLeaf), + BlindedNode[MerkleSeqElement[A]](second.rootHash), + representativeProtocolVersion, + )(hashOps) + + case Direction.Right :: tailIndex => + Branch[A]( + BlindedNode[MerkleSeqElement[A]](first.rootHash), + second.tryUnwrap.tryBlindAllButLeaf(MerkleSeqIndexFromRoot(tailIndex), actionOnLeaf), + representativeProtocolVersion, + )(hashOps) + + case Nil => + throw new UnsupportedOperationException( + "The path is invalid: path exhausted but leaf not reached" + ) + } + } + + override private[MerkleSeq] def foreachUnblindedElement( + path: Path + )(body: (M, Path) => Unit): Unit = { + first.unwrap.foreach(_.foreachUnblindedElement(Direction.Left :: path)(body)) + second.unwrap.foreach(_.foreachUnblindedElement(Direction.Right :: path)(body)) + } + + override private[MerkleSeq] def foreachBlindedElement(body: RootHash => Unit): Unit = { + first.unwrap.fold(body, _.foreachBlindedElement(body)) + second.unwrap.fold(body, _.foreachBlindedElement(body)) + } + + def toProtoV1: v1.MerkleSeqElement = + v1.MerkleSeqElement( + first = Some(MerkleTree.toBlindableNodeV1(first)), + second = Some(MerkleTree.toBlindableNodeV1(second)), + data = None, + ) + + override def pretty: Pretty[Branch.this.type] = prettyOfClass( + param("first", _.first), + param("second", _.second), + ) + + override def mapM[A <: VersionedMerkleTree[A]]( + f: M => A + ): MerkleSeqElement[A] = { + val newFirst: MerkleTree[MerkleSeqElement[A]] = + first.unwrap.fold(h => BlindedNode(h), _.mapM(f)) + val newSecond = second.unwrap.fold(h => BlindedNode(h), _.mapM(f)) + Branch(newFirst, newSecond, representativeProtocolVersion)(hashOps) + } + } + + object Singleton { + private[data] def apply[M <: VersionedMerkleTree[_]]( + data: MerkleTree[M], + protocolVersion: ProtocolVersion, + )( + hashOps: HashOps + ): Singleton[M] = { + Singleton(data, MerkleSeqElement.protocolVersionRepresentativeFor(protocolVersion))(hashOps) + } + } + + private[data] final case class Singleton[+M <: VersionedMerkleTree[_]]( + data: MerkleTree[M], + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + MerkleSeqElement.type + ], + )( + hashOps: HashOps + ) extends MerkleTreeInnerNode[Singleton[M]](hashOps) + with MerkleSeqElement[M] { + // Singleton is a subtype of MerkleTree[_], because otherwise we would leak the size of the MerkleSeq in some cases + // (e.g., if there is exactly one element). + // + // data is of type MerkleTree[_], because otherwise we would have to come up with a "surprising" implementation + // of "withBlindedSubtrees" (i.e., blind the Singleton if the data is blinded). + + override def subtrees: Seq[MerkleTree[_]] = Seq(data) + + override def toSeq: Seq[MerkleTree[M]] = Seq(data) + + override private[data] def withBlindedSubtrees( + optimizedBlindingPolicy: PartialFunction[RootHash, MerkleTree.BlindingCommand] + ): Singleton[M] = + Singleton[M](data.doBlind(optimizedBlindingPolicy), representativeProtocolVersion)(hashOps) + + override def tryBlindAllButLeaf[A <: VersionedMerkleTree[A]]( + path: MerkleSeqIndexFromRoot, + actionOnLeaf: M => A, + ): MerkleSeqElement[A] = { + path.index match { + case List() => + Singleton( + actionOnLeaf(data.tryUnwrap), + representativeProtocolVersion, + )(hashOps) + case other => + throw new UnsupportedOperationException( + s"The path is invalid: reached a leaf but the path contains more steps ($other)" + ) + } + } + + override private[MerkleSeq] def foreachUnblindedElement(path: Path)( + body: (M, Path) => Unit + ): Unit = + data.unwrap.foreach(body(_, path)) + + override private[MerkleSeq] def foreachBlindedElement(body: RootHash => Unit): Unit = + data.unwrap.fold(body, _ => ()) + + def toProtoV1: v1.MerkleSeqElement = + v1.MerkleSeqElement( + first = None, + second = None, + data = Some(MerkleTree.toBlindableNodeV1(data)), + ) + + override def pretty: Pretty[Singleton.this.type] = prettyOfClass(unnamedParam(_.data)) + + override def mapM[A <: VersionedMerkleTree[A]]( + f: M => A + ): MerkleSeqElement[A] = { + val newData: MerkleTree[A] = data.unwrap.fold(h => BlindedNode(h), f(_)) + Singleton(newData, representativeProtocolVersion)(hashOps) + } + } + + object MerkleSeqElement + extends HasProtocolVersionedWithContextCompanion[ + MerkleSeqElement[VersionedMerkleTree[_]], + // The function in the second part of the context is the deserializer for unblinded nodes + (HashOps, ByteString => ParsingResult[MerkleTree[VersionedMerkleTree[_]]]), + ] { + override val name: String = "MerkleSeqElement" + + def seqOf[M <: VersionedMerkleTree[_]]( + elementTree: MerkleTree[MerkleSeqElement[M]] + ): Seq[MerkleTree[M]] = elementTree.unwrap match { + case Right(element) => element.toSeq + case Left(rootHash) => Seq(BlindedNode(rootHash)) + } + + override def supportedProtoVersions: SupportedProtoVersions = + SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)(v1.MerkleSeqElement)( + supportedProtoVersion(_)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + private[MerkleSeq] def fromByteStringV1[M <: VersionedMerkleTree[_]]( + hashOps: HashOps, + dataFromByteString: ByteString => ParsingResult[ + MerkleTree[M with HasProtocolVersionedWrapper[_]] + ], + )(bytes: ByteString): ParsingResult[MerkleSeqElement[M]] = { + for { + proto <- ProtoConverter.protoParser(UntypedVersionedMessage.parseFrom)(bytes) + unwrapped <- proto.wrapper.data.toRight( + ProtoDeserializationError.FieldNotSet(s"MerkleSeqElement: data") + ) + merkleSeqElementP <- ProtoConverter.protoParser(v1.MerkleSeqElement.parseFrom)(unwrapped) + merkleSeqElement <- fromProtoV1( + (hashOps, dataFromByteString), + merkleSeqElementP, + ) + } yield merkleSeqElement + } + + private[MerkleSeq] def fromProtoV1[M <: VersionedMerkleTree[_]]( + context: ( + HashOps, + ByteString => ParsingResult[ + MerkleTree[M with HasProtocolVersionedWrapper[_]] + ], + ), + merkleSeqElementP: v1.MerkleSeqElement, + ): ParsingResult[MerkleSeqElement[M]] = { + val (hashOps, dataFromByteString) = context + val v1.MerkleSeqElement(maybeFirstP, maybeSecondP, maybeDataP) = merkleSeqElementP + + def branchChildFromMaybeProtoBlindableNode( + maybeNodeP: Option[v1.BlindableNode] + ): ParsingResult[Option[MerkleTree[MerkleSeqElement[M]]]] = + maybeNodeP.traverse(nodeP => + MerkleTree.fromProtoOptionV1(Some(nodeP), fromByteStringV1(hashOps, dataFromByteString)) + ) + + def singletonDataFromMaybeProtoBlindableNode( + maybeDataP: Option[v1.BlindableNode] + ): ParsingResult[Option[MerkleTree[M with HasProtocolVersionedWrapper[_]]]] = + maybeDataP.traverse(dataP => MerkleTree.fromProtoOptionV1(Some(dataP), dataFromByteString)) + + val rpv: RepresentativeProtocolVersion[MerkleSeqElement.type] = + protocolVersionRepresentativeFor(ProtoVersion(1)) + + for { + maybeFirst <- branchChildFromMaybeProtoBlindableNode(maybeFirstP) + maybeSecond <- branchChildFromMaybeProtoBlindableNode(maybeSecondP) + maybeData <- singletonDataFromMaybeProtoBlindableNode(maybeDataP) + + merkleSeqElement <- (maybeFirst, maybeSecond, maybeData) match { + case (Some(first), Some(second), None) => + Right(Branch(first, second, rpv)(hashOps)) + case (None, None, Some(data)) => + Right(Singleton[M](data, rpv)(hashOps)) + case (None, None, None) => + ProtoDeserializationError + .OtherError(s"Unable to create MerkleSeqElement, as all fields are undefined.") + .asLeft + case (Some(_), Some(_), Some(_)) => + ProtoDeserializationError + .OtherError( + s"Unable to create MerkleSeqElement, as both the fields for a Branch and a Singleton are defined." + ) + .asLeft + case (_, _, _) => + // maybeFirst.isDefined != maybeSecond.isDefined + def mkState: Option[_] => String = _.fold("undefined")(_ => "defined") + + ProtoDeserializationError + .OtherError( + s"Unable to create MerkleSeqElement, as first is ${mkState(maybeFirst)} and second is ${mkState(maybeSecond)}." + ) + .asLeft + } + + } yield merkleSeqElement + } + } + + def fromProtoV1[M <: VersionedMerkleTree[_]]( + context: ( + HashOps, + ByteString => ParsingResult[ + MerkleTree[M with HasProtocolVersionedWrapper[_]] + ], + ), + merkleSeqP: v1.MerkleSeq, + ): ParsingResult[MerkleSeq[M]] = { + val (hashOps, dataFromByteString) = context + val v1.MerkleSeq(maybeRootP) = merkleSeqP + val representativeProtocolVersion = protocolVersionRepresentativeFor(ProtoVersion(1)) + for { + rootOrEmpty <- maybeRootP.traverse(_ => + MerkleTree.fromProtoOptionV1( + maybeRootP, + MerkleSeqElement.fromByteStringV1[M](hashOps, dataFromByteString), + ) + ) + } yield MerkleSeq(rootOrEmpty)(representativeProtocolVersion, hashOps) + } + + def fromSeq[M <: VersionedMerkleTree[_]]( + hashOps: HashOps, + protocolVersion: ProtocolVersion, + )(elements: Seq[MerkleTree[M]]): MerkleSeq[M] = { + val representativeProtocolVersion = protocolVersionRepresentativeFor(protocolVersion) + val elemRepresentativeProtocolVersion = + MerkleSeqElement.protocolVersionRepresentativeFor(protocolVersion) + fromSeq(hashOps, representativeProtocolVersion, elemRepresentativeProtocolVersion)(elements) + } + + def fromSeq[M <: VersionedMerkleTree[_]]( + hashOps: HashOps, + representativeProtocolVersion: RepresentativeProtocolVersion[MerkleSeq.type], + elemRepresentativeProtocolVersion: RepresentativeProtocolVersion[MerkleSeqElement.type], + )(elements: Seq[MerkleTree[M]]): MerkleSeq[M] = { + if (elements.isEmpty) { + MerkleSeq.empty(representativeProtocolVersion, hashOps) + } else { + // elements is non-empty + + // Arrange elements in a balanced binary tree + val merkleSeqElements = elements.iterator + .map( + Singleton(_, elemRepresentativeProtocolVersion)(hashOps) + ) // Wrap elements in singletons + .map { // Blind singletons, if the enclosed element is blinded + case singleton @ Singleton(BlindedNode(_), _) => BlindedNode(singleton.rootHash) + case singleton => singleton + } + + val root = mkTree[MerkleTree[MerkleSeqElement[M]]](merkleSeqElements, elements.size) { + (first, second) => + val branch = Branch(first, second, elemRepresentativeProtocolVersion)(hashOps) + if (first.isBlinded && second.isBlinded) BlindedNode(branch.rootHash) else branch + } + + MerkleSeq(Some(root))(representativeProtocolVersion, hashOps) + } + } + + def apply[M <: VersionedMerkleTree[_]]( + rootOrEmpty: Option[MerkleTree[MerkleSeqElement[M]]], + protocolVersion: ProtocolVersion, + )(hashOps: HashOps): MerkleSeq[M] = { + MerkleSeq(rootOrEmpty)(protocolVersionRepresentativeFor(protocolVersion), hashOps) + } + + /** Create an empty MerkleSeq */ + def empty[M <: VersionedMerkleTree[_]]( + protocolVersion: ProtocolVersion, + hashOps: HashOps, + ): MerkleSeq[M] = + empty(protocolVersionRepresentativeFor(protocolVersion), hashOps) + + def empty[M <: VersionedMerkleTree[_]]( + representativeProtocolVersion: RepresentativeProtocolVersion[MerkleSeq.type], + hashOps: HashOps, + ): MerkleSeq[M] = + MerkleSeq(None)(representativeProtocolVersion, hashOps) + + /** Arranges a non-empty sequence of `elements` in a balanced binary tree. + * + * @param size The [[scala.collection.Iterator.length]] of `elements`. + * We take the size as a separate parameter because the implementation of the method relies on + * [[scala.collection.Iterator.grouped]] and computing the size of an iterator takes linear time. + * @param combine The function to construct an inner node of the binary tree from two subtrees. + */ + @tailrec + private def mkTree[E](elements: Iterator[E], size: Int)(combine: (E, E) => E): E = { + require(size > 0, "This method must be called with a positive size.") + + if (size == 1) { + elements.next() + } else { + // size > 1 + + val newElements = elements.grouped(2).map { // Create the next layer of the tree + case Seq(first, second) => combine(first, second) + case Seq(single) => single + case group => throw new IllegalStateException(s"Unexpected group size: ${group.size}") + } + + val newSize = size / 2 + size % 2 // half of size, rounded upwards + mkTree(newElements, newSize)(combine) + } + } + + /** Computes the [[ViewPosition.MerkleSeqIndex]]es for all leaves in a [[MerkleSeq]] of the given size. + * The returned indices are in sequence. + */ + // takes O(size) runtime and memory due to sharing albeit there are O(size * log(size)) directions + def indicesFromSeq(size: Int): Seq[MerkleSeqIndex] = { + require(size >= 0, "A sequence cannot have negative size") + + if (size == 0) Seq.empty[MerkleSeqIndex] + else { + val tree = mkTree[Node](Iterator.fill[Node](size)(Leaf), size)(Inner) + // enumerate all paths in the tree from left to right + tree.addTo(emptyPath, List.empty[MerkleSeqIndex]) + } + } + + // Helper classes for indicesFromSeq + private trait Node extends Product with Serializable { + + /** Prefixes `subsequentIndices` with all paths from the leaves of this subtree to the root + * + * @param pathFromRoot The path from this node to the root. + */ + def addTo(pathFromRoot: Path, subsequentIndices: List[MerkleSeqIndex]): List[MerkleSeqIndex] + } + private case object Leaf extends Node { + override def addTo( + pathFromRoot: Path, + subsequentPaths: List[MerkleSeqIndex], + ): List[MerkleSeqIndex] = + MerkleSeqIndex(pathFromRoot) :: subsequentPaths + } + private final case class Inner(left: Node, right: Node) extends Node { + override def addTo( + pathFromRoot: Path, + subsequentPaths: List[MerkleSeqIndex], + ): List[MerkleSeqIndex] = + left.addTo( + Direction.Left :: pathFromRoot, + right.addTo(Direction.Right :: pathFromRoot, subsequentPaths), + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/MerkleTree.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/MerkleTree.scala new file mode 100644 index 0000000000..57d566bc96 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/MerkleTree.scala @@ -0,0 +1,256 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.implicits.* +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.data.MerkleSeq.MerkleSeqElement +import com.digitalasset.canton.data.MerkleTree.* +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.{RootHash, v1} +import com.digitalasset.canton.serialization.HasCryptographicEvidence +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.version.HasProtocolVersionedWrapper +import com.digitalasset.canton.{DiscardOps, ProtoDeserializationError} +import com.google.protobuf.ByteString +import monocle.Lens + +import scala.collection.mutable + +/** Encapsulate a Merkle tree. + * Every node has an arbitrary number of children. + * Every node has a `rootHash`. + * + * Every node may be blinded, i.e., the `rootHash` remains, but the children are removed. The `rootHash` does not + * change if some children are blinded. + * + * @tparam A the runtime type of the class that actually implements this instance of `MerkleTree`. + * I.e., a proper implementation of this trait must be declared like + * `class MyMerkleTree extends MerkleTree[MyMerkleTree]`. + */ +trait MerkleTree[+A] extends Product with Serializable with PrettyPrinting { + def subtrees: Seq[MerkleTree[_]] + + def rootHash: RootHash + + /** @return `Left(hash)`, if this is a blinded tree and `Right(tree)` otherwise. + */ + def unwrap: Either[RootHash, A] + + /** Yields this instance with type `A` + * + * @throws java.lang.UnsupportedOperationException if this is blinded + */ + def tryUnwrap: A = unwrap match { + case Right(r) => r + case Left(_) => + throw new UnsupportedOperationException( + s"Unable to unwrap object of type ${getClass.getSimpleName}" + ) + } + + lazy val blindFully: MerkleTree[A] = BlindedNode[A](rootHash) + + /** Blinds this Merkle tree according to the commands given by `blindingStatus`. + * Traverses this tree in pre-order. + * + * @param blindingPolicy assigns blinding commands to subtrees + * @throws java.lang.IllegalArgumentException if `blindingPolicy` does not assign a blinding command to some subtree + * and all ancestors of subtree have the blinding command `RevealIfNeedBe` + */ + final def blind( + blindingPolicy: PartialFunction[MerkleTree[_], BlindingCommand] + ): MerkleTree[A] = { + + val optimizedBlindingPolicy = mutable.Map[RootHash, BlindingCommand]() + optimizeBlindingPolicy(this).discard + + // Optimizes the blinding policy by replacing RevealIfNeedBe with RevealSubtree or BlindSubtree + // whenever possible. + // Consequently, if the optimized policy associates a node with RevealIfNeedBe, then: + // - the node effectively needs to be revealed, (otherwise it would be "BlindSubtree") + // - the node effectively needs to be copied (because it has a blinded descendant). + // + // Returns (allRevealed, allBlinded) indicating whether all nodes in tree are revealed/blinded. + def optimizeBlindingPolicy(tree: MerkleTree[_]): (Boolean, Boolean) = { + completeBlindingPolicy(tree) match { + case BlindSubtree => + optimizedBlindingPolicy += tree.rootHash -> BlindSubtree + (false, true) + case RevealSubtree => + optimizedBlindingPolicy += tree.rootHash -> RevealSubtree + (true, false) + case RevealIfNeedBe => + val (allRevealed, allBlinded) = tree.subtrees + .map(optimizeBlindingPolicy) + .foldLeft((true, true)) { case ((r1, b1), (r2, b2)) => + (r1 && r2, b1 && b2) + } + val command = + if (allBlinded) BlindSubtree else if (allRevealed) RevealSubtree else RevealIfNeedBe + optimizedBlindingPolicy += tree.rootHash -> command + (allRevealed && !allBlinded, allBlinded) + } + } + + def completeBlindingPolicy(tree: MerkleTree[_]): BlindingCommand = + blindingPolicy.applyOrElse[MerkleTree[_], BlindingCommand]( + tree, + { + case _: BlindedNode[_] => BlindSubtree + case _: MerkleSeqElement[_] => RevealIfNeedBe + case _ => + throw new IllegalArgumentException( + s"No blinding command specified for subtree of type ${tree.getClass}" + ) + }, + ) + + doBlind(optimizedBlindingPolicy) + } + + /** Internal function that effectively performs the blinding. */ + private[data] final def doBlind( + optimizedBlindingPolicy: PartialFunction[RootHash, BlindingCommand] + ): MerkleTree[A] = + optimizedBlindingPolicy(rootHash) match { + case BlindSubtree => BlindedNode[A](rootHash) + case RevealSubtree => this + case RevealIfNeedBe => withBlindedSubtrees(optimizedBlindingPolicy) + } + + /** Yields a copy of this that results from applying `doBlind(optimizedBlindingPolicy)` to all children. */ + private[data] def withBlindedSubtrees( + optimizedBlindingPolicy: PartialFunction[RootHash, BlindingCommand] + ): MerkleTree[A] + + def isBlinded: Boolean = unwrap.isLeft + + lazy val isFullyUnblinded: Boolean = unwrap.isRight && subtrees.forall(_.isFullyUnblinded) + + lazy val hasAllLeavesBlinded: Boolean = unwrap.isRight && subtrees.collectFirst { + case l: MerkleTreeLeaf[_] => l + }.isEmpty +} + +/** An inner node of a Merkle tree. + * Has no data, no salt, and an arbitrary number of subtrees. + * An inner node is considered unblinded. + */ +abstract class MerkleTreeInnerNode[+A](val hashOps: HashOps) extends MerkleTree[A] { + this: A => + + override lazy val rootHash: RootHash = { + val hashBuilder = hashOps.build(HashPurpose.MerkleTreeInnerNode).add(subtrees.length) + subtrees.foreach { subtree => + // All hashes within a Merkle tree use the same hash algorithm and are therefore of fixed length, + // so no length prefix is needed. + hashBuilder.addWithoutLengthPrefix(subtree.rootHash.getCryptographicEvidence) + } + RootHash(hashBuilder.finish()) + } + + override def unwrap: Either[RootHash, A] = Right(this) +} + +/** A leaf of a Merkle tree. + * Has data, a salt, and no children. + * A leaf is considered unblinded. + */ +abstract class MerkleTreeLeaf[+A <: HasCryptographicEvidence](val hashOps: HashOps) + extends MerkleTree[A] { + this: A => + + /** The `HashPurpose` to be used for computing the root hash. + * Must uniquely identify the type of this instance. + * Must be different from `HashPurpose.MerkleTreeInnerNode`. + * + * @see [[com.digitalasset.canton.crypto.HashBuilder]] + */ + def hashPurpose: HashPurpose + + def salt: Salt + + def subtrees: Seq[MerkleTree[_]] = Seq.empty + + override lazy val rootHash: RootHash = { + if (hashPurpose == HashPurpose.MerkleTreeInnerNode) { + throw new IllegalStateException( + s"HashPurpose must not be a ${HashPurpose.description(hashPurpose)}" + ) + } + val hash = hashOps + .build(hashPurpose) + .add(salt.forHashing) + .add(tryUnwrap.getCryptographicEvidence) + .finish() + RootHash(hash) + } + + override def unwrap = Right(this) + + override private[data] def withBlindedSubtrees( + optimizedBlindingPolicy: PartialFunction[RootHash, BlindingCommand] + ): A with MerkleTree[A] = this +} + +/** A blinded node of a Merkle tree. + * Has no subtrees, as they are all blinded. + */ +final case class BlindedNode[+A](rootHash: RootHash) extends MerkleTree[A] { + override def subtrees: Seq[MerkleTree[_]] = Seq.empty + + override private[data] def withBlindedSubtrees( + optimizedBlindingPolicy: PartialFunction[RootHash, BlindingCommand] + ): MerkleTree[A] = this + + override def unwrap: Either[RootHash, A] = Left(rootHash) + + override def pretty: Pretty[BlindedNode.this.type] = prettyOfClass(unnamedParam(_.rootHash)) +} + +object MerkleTree { + type VersionedMerkleTree[A] = MerkleTree[A] with HasProtocolVersionedWrapper[_] + + /** Command indicating whether and how to blind a Merkle tree. */ + sealed trait BlindingCommand extends Product with Serializable + + case object BlindSubtree extends BlindingCommand + + case object RevealSubtree extends BlindingCommand + + /** Reveal the node if at least one descendant is revealed as well */ + case object RevealIfNeedBe extends BlindingCommand + + def toBlindableNodeV1(node: MerkleTree[HasProtocolVersionedWrapper[_]]): v1.BlindableNode = + v1.BlindableNode(blindedOrNot = node.unwrap match { + case Left(h) => v1.BlindableNode.BlindedOrNot.BlindedHash(h.toProtoPrimitive) + case Right(n) => + v1.BlindableNode.BlindedOrNot.Unblinded( + n.toByteString + ) + }) + + def fromProtoOptionV1[NodeType]( + protoNode: Option[v1.BlindableNode], + f: ByteString => ParsingResult[MerkleTree[NodeType]], + ): ParsingResult[MerkleTree[NodeType]] = { + import v1.BlindableNode.BlindedOrNot as BON + protoNode.map(_.blindedOrNot) match { + case Some(BON.BlindedHash(hashBytes)) => + RootHash + .fromProtoPrimitive(hashBytes) + .bimap( + e => ProtoDeserializationError.OtherError(s"Failed to deserialize root hash: $e"), + hash => BlindedNode.apply[NodeType](hash), + ) + case Some(BON.Unblinded(unblindedNode)) => f(unblindedNode) + case Some(BON.Empty) | None => + Left(ProtoDeserializationError.OtherError(s"Missing blindedOrNot specification")) + } + } + + def tryUnwrap[A <: MerkleTree[A]]: Lens[MerkleTree[A], A] = + Lens[MerkleTree[A], A](_.tryUnwrap)(unwrapped => _ => unwrapped) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ParticipantMetadata.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ParticipantMetadata.scala new file mode 100644 index 0000000000..360fddcc46 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ParticipantMetadata.scala @@ -0,0 +1,109 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.either.* +import com.digitalasset.canton.* +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.protocol.v0 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{ProtoConverter, ProtocolVersionedMemoizedEvidence} +import com.digitalasset.canton.version.* +import com.google.protobuf.ByteString + +/** Information concerning every '''participant''' involved in the underlying transaction. + * + * @param ledgerTime The ledger time of the transaction + * @param submissionTime The submission time of the transaction + * @param workflowIdO optional workflow id associated with the ledger api provided workflow instance + */ +final case class ParticipantMetadata private ( + ledgerTime: CantonTimestamp, + submissionTime: CantonTimestamp, + workflowIdO: Option[WorkflowId], + salt: Salt, +)( + hashOps: HashOps, + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + ParticipantMetadata.type + ], + override val deserializedFrom: Option[ByteString], +) extends MerkleTreeLeaf[ParticipantMetadata](hashOps) + with HasProtocolVersionedWrapper[ParticipantMetadata] + with ProtocolVersionedMemoizedEvidence { + + override protected[this] def toByteStringUnmemoized: ByteString = + super[HasProtocolVersionedWrapper].toByteString + + override val hashPurpose: HashPurpose = HashPurpose.ParticipantMetadata + + override def pretty: Pretty[ParticipantMetadata] = prettyOfClass( + param("ledger time", _.ledgerTime), + param("submission time", _.submissionTime), + paramIfDefined("workflow id", _.workflowIdO), + param("salt", _.salt), + ) + + @transient override protected lazy val companionObj: ParticipantMetadata.type = + ParticipantMetadata + + private def toProtoV0: v0.ParticipantMetadata = v0.ParticipantMetadata( + ledgerTime = Some(ledgerTime.toProtoPrimitive), + submissionTime = Some(submissionTime.toProtoPrimitive), + workflowId = workflowIdO.fold("")(_.toProtoPrimitive), + salt = Some(salt.toProtoV0), + ) +} + +object ParticipantMetadata + extends HasMemoizedProtocolVersionedWithContextCompanion[ParticipantMetadata, HashOps] { + override val name: String = "ParticipantMetadata" + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> VersionedProtoConverter(ProtocolVersion.v30)(v0.ParticipantMetadata)( + supportedProtoVersionMemoized(_)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + def apply(hashOps: HashOps)( + ledgerTime: CantonTimestamp, + submissionTime: CantonTimestamp, + workflowId: Option[WorkflowId], + salt: Salt, + protocolVersion: ProtocolVersion, + ): ParticipantMetadata = + ParticipantMetadata(ledgerTime, submissionTime, workflowId, salt)( + hashOps, + protocolVersionRepresentativeFor(protocolVersion), + None, + ) + + private def fromProtoV0(hashOps: HashOps, metadataP: v0.ParticipantMetadata)( + bytes: ByteString + ): ParsingResult[ParticipantMetadata] = + for { + let <- ProtoConverter + .parseRequired(CantonTimestamp.fromProtoPrimitive, "ledgerTime", metadataP.ledgerTime) + v0.ParticipantMetadata(saltP, _ledgerTimeP, submissionTimeP, workflowIdP) = metadataP + submissionTime <- ProtoConverter + .parseRequired(CantonTimestamp.fromProtoPrimitive, "submissionTime", submissionTimeP) + workflowId <- workflowIdP match { + case "" => Right(None) + case wf => + WorkflowId + .fromProtoPrimitive(wf) + .map(Some(_)) + .leftMap(ProtoDeserializationError.ValueDeserializationError("workflowId", _)) + } + salt <- ProtoConverter + .parseRequired(Salt.fromProtoV0, "salt", saltP) + .leftMap(_.inField("salt")) + } yield ParticipantMetadata(let, submissionTime, workflowId, salt)( + hashOps, + protocolVersionRepresentativeFor(ProtoVersion(0)), + Some(bytes), + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ParticipantTransactionView.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ParticipantTransactionView.scala new file mode 100644 index 0000000000..e515fe2abb --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ParticipantTransactionView.scala @@ -0,0 +1,41 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.either.* + +/** Tags transaction views where all the view metadata are visible (such as in the views sent to participants). + * + * Note that the subviews and their metadata are not guaranteed to be visible. + */ +final case class ParticipantTransactionView private (view: TransactionView) { + def unwrap: TransactionView = view + def viewCommonData: ViewCommonData = view.viewCommonData.tryUnwrap + def viewParticipantData: ViewParticipantData = view.viewParticipantData.tryUnwrap +} + +object ParticipantTransactionView { + + final case class InvalidParticipantTransactionView(message: String) + extends RuntimeException(message) + + def tryCreate(view: TransactionView): ParticipantTransactionView = + view.viewCommonData.unwrap + .leftMap(rh => s"Common data blinded (hash $rh)") + .toValidatedNec + .product( + view.viewParticipantData.unwrap + .leftMap(rh => s"Participant data blinded (hash $rh)") + .toValidatedNec + ) + .map(_ => new ParticipantTransactionView(view)) + .valueOr(err => + throw InvalidParticipantTransactionView( + s"Unable to convert view (hash ${view.viewHash}) to a participant view: $err" + ) + ) + + def create(view: TransactionView): Either[String, ParticipantTransactionView] = + Either.catchOnly[InvalidParticipantTransactionView](tryCreate(view)).leftMap(_.message) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/PeanoQueue.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/PeanoQueue.scala new file mode 100644 index 0000000000..e31a672ffe --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/PeanoQueue.scala @@ -0,0 +1,109 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import com.digitalasset.canton.data.PeanoQueue.AssociatedValue + +import scala.annotation.tailrec + +/** A Peano priority queue is a mutable priority queue of key-value pairs with ordered keys, + * starting from an index called the head, + * where pairs may be added in any order, + * but are polled strictly in their natural sequence. + * The order on keys must be a linear sequence, + * i.e., isomorphic to the order on a possibly unbounded interval of the integers. + * If the priority queue is missing a key from the sequence, we cannot poll that key + * until a key-value pair for that key is added. + * + * For example, in a priority queue with head 1, the keys polled are 1, then 2, then 3, etc. + * + * The head index is mutable, and increments each time the priority queue is successfully polled. + * Keys are unique and their value associations may not be modified. + * + * @tparam K The type of keys + * @tparam V The type of values + */ +trait PeanoQueue[K, V] { + + /** Returns the head of the [[PeanoQueue]]. + * + * The head denotes the next key to be pulled. + * The head key need not yet have been inserted into the [[PeanoQueue]]. + */ + def head: K + + /** Returns the front of the [[PeanoQueue]]. + * + * The front is defined as the least key that is at least [[PeanoQueue.head]] + * and that has not yet been inserted into the [[PeanoQueue]]. + */ + def front: K + + /** Inserts the key-value pair `(key, value)` to the [[PeanoQueue]]. + * This may change the value for [[PeanoQueue.front]] + * + * Inserting a key-value pair is idempotent. + * If the keys are bounded from above, then the maximal value must not be inserted. + * + * @param key The key to be inserted. + * If the key has been added previously, then the following applies: + *
    + *
  • If the key is below [[PeanoQueue.head]], then this insert operation has no effect.
  • + *
  • If the key is at least [[PeanoQueue.head]] and the value is the same, this operation has no effect.
  • + *
  • If the key is at least [[PeanoQueue.head]] and the value is different, + * then the [[PeanoQueue]] is not changed and an [[java.lang.IllegalArgumentException]] is thrown.
  • + *
+ * @param value The value associated with the key. + * @return whether the key is at least [[PeanoQueue.head]] + * @throws java.lang.IllegalArgumentException if the keys are bounded from above and `key` is the maximal value, or + * if the `key` is above [[PeanoQueue.head]] and has been added with a different value. + */ + def insert(key: K, value: V): Boolean + + /** Whether the key `key` has already been `insert`-ed to the [[PeanoQueue]]. All values below the [[PeanoQueue.front]] + * are considered to have been inserted to the [[PeanoQueue]], even if they are below the initial [[PeanoQueue.head]]. + */ + def alreadyInserted(key: K): Boolean + + /** Returns the value associated with the given `key`. */ + def get(key: K): AssociatedValue[V] + + /** Returns and drops the key at [[PeanoQueue.head]] and its associated value, if present. + * If so, this also increments [[PeanoQueue.head]]. + * Returns [[scala.None]] to indicate that the key [[PeanoQueue.head]] has not yet been inserted. + */ + def poll(): Option[(K, V)] + + /** Drops all elements from the [[PeanoQueue]] below the front and sets head to front. + * @return [[scala.None$]] if the head already was at the front or the value associated with the last value before head otherwise. + */ + def dropUntilFront(): Option[(K, V)] = { + @tailrec + def go(last: Option[(K, V)]): Option[(K, V)] = poll() match { + case None => last + case kvO @ Some(_) => go(kvO) + } + go(None) + } +} + +object PeanoQueue { + + sealed trait AssociatedValue[+V] extends Product with Serializable + + /** Returned by [[PeanoQueue.get]] for a `key` that is below [[PeanoQueue.head]]. */ + final case object BeforeHead extends AssociatedValue[Nothing] + + /** Returned by [[PeanoQueue.get]] for a `key` that is at least [[PeanoQueue.front]] and has not been inserted. + * @param floor The value associated with the next smaller key. + * [[scala.None$]] if there is no such key that is at least [[PeanoQueue.head]] + * @param ceiling The value associated with the next larger key, if any. + */ + final case class NotInserted[V](floor: Option[V], ceiling: Option[V]) extends AssociatedValue[V] + + /** Returned by [[PeanoQueue.get]] for a `key` that is at least [[PeanoQueue.head]] + * and has been inserted with the value [[value]] + */ + final case class InsertedValue[V](value: V) extends AssociatedValue[V] +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/PeanoTreeQueue.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/PeanoTreeQueue.scala new file mode 100644 index 0000000000..ef57d73c7f --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/PeanoTreeQueue.scala @@ -0,0 +1,158 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.data.PeanoQueue.{ + AssociatedValue, + BeforeHead, + InsertedValue, + NotInserted, +} +import com.google.common.annotations.VisibleForTesting + +import scala.collection.mutable +import scala.concurrent.blocking + +/** Implementation of [[PeanoQueue]] for [[Counter]] keys based on a tree map. + * + * This implementation is not thread safe. + */ +@SuppressWarnings(Array("org.wartremover.warts.Var")) +class PeanoTreeQueue[Discr, V](initHead: Counter[Discr]) extends PeanoQueue[Counter[Discr], V] { + + private val elems: mutable.TreeMap[Counter[Discr], V] = mutable.TreeMap.empty[Counter[Discr], V] + + private var headV: Counter[Discr] = initHead + + override def head: Counter[Discr] = headV + + private var frontV: Counter[Discr] = initHead + + override def front: Counter[Discr] = frontV + + override def insert(key: Counter[Discr], value: V): Boolean = { + require( + key.isNotMaxValue, + s"The maximal key value ${Counter.MaxValue} cannot be inserted.", + ) + + def associationChanged(oldValue: V): Nothing = + throw new IllegalArgumentException( + s"New value $value for key $key differs from old value $oldValue." + ) + + if (key >= frontV) { + elems.put(key, value) match { + case None => if (key == frontV) cleanup() + case Some(oldValue) => + if (oldValue != value) { + elems.put(key, oldValue).discard // undo the changes + associationChanged(oldValue) + } + } + true + } else if (key >= headV) { + val oldValue = + elems + .get(key) + .getOrElse( + throw new IllegalStateException("Unreachable code by properties of the PeanoQueue") + ) + if (value != oldValue) + associationChanged(oldValue) + true + } else false + } + + override def alreadyInserted(key: Counter[Discr]): Boolean = { + if (key >= frontV) { + elems.contains(key) + } else { + true + } + } + + /** Update `front` as long as the [[elems]] contain consecutive key-value pairs starting at `front`. + */ + @SuppressWarnings(Array("org.wartremover.warts.While")) + private def cleanup(): Unit = { + @SuppressWarnings(Array("org.wartremover.warts.Var")) + var next = frontV + val iter = elems.keysIteratorFrom(next) + while (iter.hasNext && iter.next() == next) { + next += 1 + } + frontV = next + } + + def get(key: Counter[Discr]): AssociatedValue[V] = { + if (key < headV) BeforeHead + else + elems.get(key) match { + case None => + val floor = elems.rangeImpl(None, Some(key)).lastOption.map(_._2) + val ceiling = elems.rangeImpl(Some(key), None).headOption.map(_._2) + NotInserted(floor, ceiling) + case Some(value) => InsertedValue(value) + } + } + + override def poll(): Option[(Counter[Discr], V)] = { + if (headV >= frontV) None + else { + val key = headV + val value = + elems + .remove(key) + .getOrElse( + throw new IllegalStateException("Unreachable code by properties of the PeanoQueue") + ) + headV = key + 1 + Some((key, value)) + } + } + + @VisibleForTesting + def invariant: Boolean = { + headV <= frontV && + elems.rangeImpl(None, Some(frontV + 1)).toSeq.map(_._1) == (headV until frontV) + } + + override def toString: String = { + val builder = new StringBuilder("PeanoQueue(front = ").append(frontV) + elems.foreach { case (k, v) => + builder.append(", ").append(k).append("->").append(v.toString).discard[StringBuilder] + } + builder.append(")") + builder.toString + } +} + +object PeanoTreeQueue { + def apply[Discr, V](init: Counter[Discr]) = new PeanoTreeQueue[Discr, V](init) +} + +/** A thread-safe [[PeanoTreeQueue]] thanks to synchronizing all methods */ +class SynchronizedPeanoTreeQueue[Discr, V](initHead: Counter[Discr]) + extends PeanoQueue[Counter[Discr], V] { + private[this] val queue: PeanoQueue[Counter[Discr], V] = new PeanoTreeQueue(initHead) + + override def head: Counter[Discr] = blocking { queue synchronized queue.head } + + override def front: Counter[Discr] = blocking { queue synchronized queue.front } + + override def insert(key: Counter[Discr], value: V): Boolean = + blocking { queue synchronized queue.insert(key, value) } + + override def alreadyInserted(key: Counter[Discr]): Boolean = + blocking { queue synchronized queue.alreadyInserted(key) } + + override def get(key: Counter[Discr]): AssociatedValue[V] = blocking { + queue synchronized queue.get(key) + } + + override def poll(): Option[(Counter[Discr], V)] = blocking { queue synchronized queue.poll() } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ProcessedDisclosedContract.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ProcessedDisclosedContract.scala new file mode 100644 index 0000000000..9522a79e56 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ProcessedDisclosedContract.scala @@ -0,0 +1,54 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import com.daml.lf.data.{Bytes, Ref, Time} +import com.daml.lf.transaction.{GlobalKeyWithMaintainers, Node, TransactionVersion} +import com.daml.lf.value.Value + +/** An explicitly-disclosed contract that has been used during command interpretation + * and enriched with additional contract metadata. + * + * @param create the create event of the contract + * @param createdAt ledger effective time of the transaction that created the contract + * @param driverMetadata opaque bytestring used by the underlying ledger implementation + */ +final case class ProcessedDisclosedContract( + create: Node.Create, + createdAt: Time.Timestamp, + driverMetadata: Bytes, +) { + def contractId: Value.ContractId = create.coid + def templateId: Ref.TypeConName = create.templateId +} + +object ProcessedDisclosedContract { + // Helper builder for test + def apply( + templateId: Ref.Identifier, + contractId: Value.ContractId, + argument: Value, + createdAt: Time.Timestamp, + driverMetadata: Bytes, + signatories: Set[Ref.Party], + stakeholders: Set[Ref.Party], + keyOpt: Option[GlobalKeyWithMaintainers], + agreementText: String, + version: TransactionVersion, + ): ProcessedDisclosedContract = + ProcessedDisclosedContract( + create = Node.Create( + templateId = templateId, + coid = contractId, + arg = argument, + signatories = signatories, + stakeholders = stakeholders, + keyOpt = keyOpt, + agreementText = agreementText, + version = version, + ), + createdAt = createdAt, + driverMetadata = driverMetadata, + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/RepairContract.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/RepairContract.scala new file mode 100644 index 0000000000..ecc88f8c7a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/RepairContract.scala @@ -0,0 +1,20 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import com.digitalasset.canton.TransferCounterO +import com.digitalasset.canton.protocol.SerializableContract +import com.digitalasset.canton.topology.PartyId + +/** Serializable contract with witnesses for contract add/import used in admin repairs. + * + * @param contract serializable contract + * @param witnesses optional witnesses that observe the creation of the contract + * @param transferCounter optional reassignment counter for the given [[contract]] + */ +final case class RepairContract( + contract: SerializableContract, + witnesses: Set[PartyId], + transferCounter: TransferCounterO, +) diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/SubmitterMetadata.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/SubmitterMetadata.scala new file mode 100644 index 0000000000..a6d30ffbb3 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/SubmitterMetadata.scala @@ -0,0 +1,205 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.daml.lf.data.Ref +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.* +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.ledger.api.DeduplicationPeriod +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{ProtoConverter, ProtocolVersionedMemoizedEvidence} +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.version.* +import com.google.protobuf.ByteString + +/** Information about the submitters of the transaction + * `maxSequencingTimeO` was added in PV=5, so it will only be defined for PV >= 5, and will be `None` otherwise. + */ +final case class SubmitterMetadata private ( + actAs: NonEmpty[Set[LfPartyId]], + applicationId: ApplicationId, + commandId: CommandId, + submitterParticipant: ParticipantId, + salt: Salt, + submissionId: Option[LedgerSubmissionId], + dedupPeriod: DeduplicationPeriod, + maxSequencingTime: CantonTimestamp, +)( + hashOps: HashOps, + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + SubmitterMetadata.type + ], + override val deserializedFrom: Option[ByteString], +) extends MerkleTreeLeaf[SubmitterMetadata](hashOps) + with HasProtocolVersionedWrapper[SubmitterMetadata] + with ProtocolVersionedMemoizedEvidence { + + override protected[this] def toByteStringUnmemoized: ByteString = + super[HasProtocolVersionedWrapper].toByteString + + override val hashPurpose: HashPurpose = HashPurpose.SubmitterMetadata + + override def pretty: Pretty[SubmitterMetadata] = prettyOfClass( + param("act as", _.actAs), + param("application id", _.applicationId), + param("command id", _.commandId), + param("submitter participant", _.submitterParticipant), + param("salt", _.salt), + paramIfDefined("submission id", _.submissionId), + param("deduplication period", _.dedupPeriod), + param("max sequencing time", _.maxSequencingTime), + ) + + @transient override protected lazy val companionObj: SubmitterMetadata.type = SubmitterMetadata + + protected def toProtoV1: v1.SubmitterMetadata = v1.SubmitterMetadata( + actAs = actAs.toSeq, + applicationId = applicationId.toProtoPrimitive, + commandId = commandId.toProtoPrimitive, + submitterParticipant = submitterParticipant.toProtoPrimitive, + salt = Some(salt.toProtoV0), + submissionId = submissionId.getOrElse(""), + dedupPeriod = Some(SerializableDeduplicationPeriod(dedupPeriod).toProtoV0), + maxSequencingTime = Some(maxSequencingTime.toProtoPrimitive), + ) +} + +object SubmitterMetadata + extends HasMemoizedProtocolVersionedWithContextCompanion[ + SubmitterMetadata, + HashOps, + ] { + override val name: String = "SubmitterMetadata" + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)(v1.SubmitterMetadata)( + supportedProtoVersionMemoized(_)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + def apply( + actAs: NonEmpty[Set[LfPartyId]], + applicationId: ApplicationId, + commandId: CommandId, + submitterParticipant: ParticipantId, + salt: Salt, + submissionId: Option[LedgerSubmissionId], + dedupPeriod: DeduplicationPeriod, + maxSequencingTime: CantonTimestamp, + hashOps: HashOps, + protocolVersion: ProtocolVersion, + ): SubmitterMetadata = SubmitterMetadata( + actAs, // Canton ignores SubmitterInfo.readAs per https://github.com/digital-asset/daml/pull/12136 + applicationId, + commandId, + submitterParticipant, + salt, + submissionId, + dedupPeriod, + maxSequencingTime, + )(hashOps, protocolVersionRepresentativeFor(protocolVersion), None) + + def fromSubmitterInfo(hashOps: HashOps)( + submitterActAs: List[Ref.Party], + submitterApplicationId: Ref.ApplicationId, + submitterCommandId: Ref.CommandId, + submitterSubmissionId: Option[Ref.SubmissionId], + submitterDeduplicationPeriod: DeduplicationPeriod, + submitterParticipant: ParticipantId, + salt: Salt, + maxSequencingTime: CantonTimestamp, + protocolVersion: ProtocolVersion, + ): Either[String, SubmitterMetadata] = { + NonEmpty.from(submitterActAs.toSet).toRight("The actAs set must not be empty.").map { + actAsNes => + SubmitterMetadata( + actAsNes, // Canton ignores SubmitterInfo.readAs per https://github.com/digital-asset/daml/pull/12136 + ApplicationId(submitterApplicationId), + CommandId(submitterCommandId), + submitterParticipant, + salt, + submitterSubmissionId, + submitterDeduplicationPeriod, + maxSequencingTime, + hashOps, + protocolVersion, + ) + } + } + + private def fromProtoV1(hashOps: HashOps, metaDataP: v1.SubmitterMetadata)( + bytes: ByteString + ): ParsingResult[SubmitterMetadata] = { + val v1.SubmitterMetadata( + saltOP, + actAsP, + applicationIdP, + commandIdP, + submitterParticipantP, + submissionIdP, + dedupPeriodOP, + maxSequencingTimeOP, + ) = metaDataP + + for { + submitterParticipant <- ParticipantId + .fromProtoPrimitive(submitterParticipantP, "SubmitterMetadata.submitter_participant") + actAs <- actAsP.traverse( + ProtoConverter + .parseLfPartyId(_) + .leftMap(e => ProtoDeserializationError.ValueConversionError("actAs", e.message)) + ) + applicationId <- ApplicationId + .fromProtoPrimitive(applicationIdP) + .leftMap(ProtoDeserializationError.ValueConversionError("applicationId", _)) + commandId <- CommandId + .fromProtoPrimitive(commandIdP) + .leftMap(ProtoDeserializationError.ValueConversionError("commandId", _)) + salt <- ProtoConverter + .parseRequired(Salt.fromProtoV0, "salt", saltOP) + .leftMap(e => ProtoDeserializationError.ValueConversionError("salt", e.message)) + submissionIdO <- Option + .when(submissionIdP.nonEmpty)(submissionIdP) + .traverse( + LedgerSubmissionId + .fromString(_) + .leftMap(ProtoDeserializationError.ValueConversionError("submissionId", _)) + ) + dedupPeriod <- ProtoConverter + .parseRequired( + SerializableDeduplicationPeriod.fromProtoV0, + "SubmitterMetadata.deduplication_period", + dedupPeriodOP, + ) + .leftMap(e => + ProtoDeserializationError.ValueConversionError("deduplicationPeriod", e.message) + ) + actAsNes <- NonEmpty + .from(actAs.toSet) + .toRight( + ProtoDeserializationError.ValueConversionError("acsAs", "actAs set must not be empty.") + ) + maxSequencingTime <- ProtoConverter.parseRequired( + CantonTimestamp.fromProtoPrimitive, + "SubmitterMetadata.max_sequencing_time", + maxSequencingTimeOP, + ) + } yield SubmitterMetadata( + actAsNes, + applicationId, + commandId, + submitterParticipant, + salt, + submissionIdO, + dedupPeriod, + maxSequencingTime, + )(hashOps, protocolVersionRepresentativeFor(ProtoVersion(1)), Some(bytes)) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/Timestamp.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/Timestamp.scala new file mode 100644 index 0000000000..85a5607c3d --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/Timestamp.scala @@ -0,0 +1,43 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import com.digitalasset.canton.LfTimestamp +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter +import com.google.protobuf.timestamp.Timestamp as ProtoTimestamp + +import java.time.Instant +import java.util.Date + +trait Timestamp extends PrettyPrinting { + def underlying: LfTimestamp + + def toLf: LfTimestamp = underlying + + def isAfter(t: CantonTimestamp): Boolean = underlying.compareTo(t.underlying) > 0 + + def isBefore(t: CantonTimestamp): Boolean = underlying.compareTo(t.underlying) < 0 + + def toProtoPrimitive: ProtoTimestamp = + ProtoConverter.InstantConverter.toProtoPrimitive(underlying.toInstant) + + def getEpochSecond: Long = underlying.toInstant.getEpochSecond + + def toEpochMilli: Long = underlying.toInstant.toEpochMilli + + def toInstant: Instant = underlying.toInstant + + def toDate: Date = Date.from(underlying.toInstant) + + def toMicros: Long = underlying.micros + + def microsOverSecond(): Long = { + val nanos = underlying.toInstant.getNano + assert(nanos % 1000 == 0) + nanos / 1000L + } + + override def pretty: Pretty[this.type] = prettyOfParam(_.underlying) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransactionSubviews.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransactionSubviews.scala new file mode 100644 index 0000000000..50bf7d0d8a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransactionSubviews.scala @@ -0,0 +1,129 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import com.digitalasset.canton.crypto.HashOps +import com.digitalasset.canton.data.MerkleTree.BlindingCommand +import com.digitalasset.canton.data.ViewPosition.{MerklePathElement, MerkleSeqIndexFromRoot} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.{ConfirmationPolicy, RootHash, ViewHash, v1} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.version.{ProtoVersion, ProtocolVersion} + +/** Abstraction over the subviews of a [[TransactionView]] + * Implementation of [[TransactionSubviews]] where the subviews are a merkle tree + * + * @param subviews transaction views wrapped in this class + */ +final case class TransactionSubviews private[data] ( + subviews: MerkleSeq[TransactionView] +) extends PrettyPrinting { + def toProtoV1: v1.MerkleSeq = subviews.toProtoV1 + + lazy val unblindedElementsWithIndex: Seq[(TransactionView, MerklePathElement)] = + subviews.unblindedElementsWithIndex + + lazy val trees: Seq[MerkleTree[?]] = subviews.rootOrEmpty.toList + + def doBlind(policy: PartialFunction[RootHash, BlindingCommand]): TransactionSubviews = + TransactionSubviews(subviews.doBlind(policy)) + + def blindFully: TransactionSubviews = + TransactionSubviews(subviews.blindFully) + + lazy val areFullyBlinded: Boolean = subviews.isFullyBlinded + + lazy val blindedElements: Seq[RootHash] = subviews.blindedElements + + /** Check that the provided subview hashes are consistent with the ones from the contained subviews. */ + def hashesConsistentWith(hashOps: HashOps)(subviewHashes: Seq[ViewHash]): Boolean = { + val merkleSeqRepr = subviews.representativeProtocolVersion + val merkleSeqElemRepr = subviews.tryMerkleSeqElementRepresentativeProtocolVersion + + val subviewsSeq = subviewHashes.map(h => BlindedNode(h.toRootHash)) + val subviewsToCheck = MerkleSeq.fromSeq(hashOps, merkleSeqRepr, merkleSeqElemRepr)(subviewsSeq) + + subviews.rootHashO == subviewsToCheck.rootHashO + } + + def tryBlindForTransactionViewTree( + viewPos: ViewPositionFromRoot + ): TransactionSubviews = { + viewPos.position match { + case (head: MerkleSeqIndexFromRoot) +: tail => + TransactionSubviews( + subviews.tryBlindAllButLeaf( + head, + _.tryBlindForTransactionViewTree(ViewPositionFromRoot(tail)), + ) + ) + case other => throw new UnsupportedOperationException(s"Invalid path: $other") + } + } + + lazy val unblindedElements: Seq[TransactionView] = unblindedElementsWithIndex.map(_._1) + + /** Apply `f` to all the unblinded contained subviews */ + def mapUnblinded(f: TransactionView => TransactionView): TransactionSubviews = { + TransactionSubviews(subviews.mapM(f)) + } + + def pretty: Pretty[TransactionSubviews.this.type] = prettyOfClass( + unnamedParam(_.subviews) + ) + + /** Return the view hashes of the contained subviews + * + * @throws java.lang.IllegalStateException if applied to a [[TransactionSubviews]] with blinded elements + */ + lazy val trySubviewHashes: Seq[ViewHash] = { + if (blindedElements.isEmpty) unblindedElements.map(_.viewHash) + else + throw new IllegalStateException( + "Attempting to get subviewHashes from a TransactionSubviewsV1 with blinded elements" + ) + } + + /** Assert that all contained subviews are unblinded + * + * @throws java.lang.IllegalStateException if there are blinded subviews, passing the first blinded subview hash + * to the provided function to generate the error message + */ + def assertAllUnblinded(makeMessage: RootHash => String): Unit = + blindedElements.headOption.foreach(hash => throw new IllegalStateException(makeMessage(hash))) +} + +object TransactionSubviews { + private[data] def fromProtoV1( + context: (HashOps, ConfirmationPolicy), + subviewsPO: Option[v1.MerkleSeq], + ): ParsingResult[TransactionSubviews] = { + val (hashOps, _) = context + for { + subviewsP <- ProtoConverter.required("ViewNode.subviews", subviewsPO) + tvParser = TransactionView.fromByteString(ProtoVersion(1))(context) + subviews <- MerkleSeq.fromProtoV1((hashOps, tvParser), subviewsP) + } yield TransactionSubviews(subviews) + } + + def apply( + subviewsSeq: Seq[MerkleTree[TransactionView]] + )(protocolVersion: ProtocolVersion, hashOps: HashOps): TransactionSubviews = + TransactionSubviews(MerkleSeq.fromSeq(hashOps, protocolVersion)(subviewsSeq)) + + def empty(protocolVersion: ProtocolVersion, hashOps: HashOps): TransactionSubviews = + apply(Seq.empty)(protocolVersion, hashOps) + + /** Produce a sequence of indices for subviews. + * When subviews are stored in a sequence, it is essentially (0, ..., size - 1). + * When subviews are stored in a merkle tree, it gives the view paths in the tree. For example, a + * balanced tree with 4 subviews will produce (LL, LR, RL, RR). + * + * @param nbOfSubviews total number of subviews + * @return the sequence of indices for the subviews + */ + def indices(nbOfSubviews: Int): Seq[MerklePathElement] = + MerkleSeq.indicesFromSeq(nbOfSubviews) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransactionView.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransactionView.scala new file mode 100644 index 0000000000..c1b87f3482 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransactionView.scala @@ -0,0 +1,502 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.either.* +import cats.syntax.functor.* +import com.daml.lf.transaction.ContractStateMachine.{ActiveLedgerState, KeyMapping} +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.data.TransactionView.InvalidView +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.logging.{HasLoggerName, NamedLoggingContext} +import com.digitalasset.canton.protocol.{v1, *} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.util.{ErrorUtil, MapsUtil, NamedLoggingLazyVal} +import com.digitalasset.canton.version.* +import com.digitalasset.canton.{LfPartyId, ProtoDeserializationError} +import com.google.common.annotations.VisibleForTesting +import monocle.Lens +import monocle.macros.GenLens + +/** Encapsulates a subaction of the underlying transaction. + * + * @param subviews the top-most subviews of this view + * @throws TransactionView$.InvalidView if the `viewCommonData` is unblinded and equals the `viewCommonData` of a direct subview + */ +final case class TransactionView private ( + viewCommonData: MerkleTree[ViewCommonData], + viewParticipantData: MerkleTree[ViewParticipantData], + subviews: TransactionSubviews, +)( + hashOps: HashOps, + override val representativeProtocolVersion: RepresentativeProtocolVersion[TransactionView.type], +) extends MerkleTreeInnerNode[TransactionView](hashOps) + with HasProtocolVersionedWrapper[TransactionView] + with HasLoggerName { + + @transient override protected lazy val companionObj: TransactionView.type = TransactionView + + if (viewCommonData.unwrap.isRight) { + subviews.unblindedElementsWithIndex + .find { case (view, _path) => view.viewCommonData == viewCommonData } + .foreach { case (_view, path) => + throw InvalidView( + s"The subview with index $path has an equal viewCommonData." + ) + } + } + + def subviewHashesConsistentWith(subviewHashes: Seq[ViewHash]): Boolean = { + subviews.hashesConsistentWith(hashOps)(subviewHashes) + } + + override def subtrees: Seq[MerkleTree[_]] = + Seq[MerkleTree[_]](viewCommonData, viewParticipantData) ++ subviews.trees + + def tryUnblindViewParticipantData( + fieldName: String + )(implicit loggingContext: NamedLoggingContext): ViewParticipantData = + viewParticipantData.unwrap.getOrElse( + ErrorUtil.internalError( + new IllegalStateException( + s"$fieldName of view $viewHash can be computed only if the view participant data is unblinded" + ) + ) + ) + + override private[data] def withBlindedSubtrees( + blindingCommandPerNode: PartialFunction[RootHash, MerkleTree.BlindingCommand] + ): MerkleTree[TransactionView] = + TransactionView.tryCreate( + viewCommonData.doBlind(blindingCommandPerNode), // O(1) + viewParticipantData.doBlind(blindingCommandPerNode), // O(1) + subviews.doBlind(blindingCommandPerNode), // O(#subviews) + representativeProtocolVersion, + )(hashOps) + + private[data] def tryBlindForTransactionViewTree( + viewPos: ViewPositionFromRoot + ): TransactionView = { + val isMainView = viewPos.isEmpty + + if (isMainView) this + else { + TransactionView.tryCreate( + viewCommonData.blindFully, + viewParticipantData.blindFully, + subviews.tryBlindForTransactionViewTree(viewPos), + representativeProtocolVersion, + )(hashOps) + } + } + + val viewHash: ViewHash = ViewHash.fromRootHash(rootHash) + + /** Traverses all unblinded subviews `v1, v2, v3, ...` in pre-order and yields + * `f(...f(f(z, v1), v2)..., vn)` + */ + def foldLeft[A](z: A)(f: (A, TransactionView) => A): A = + subviews.unblindedElements + .to(LazyList) + .foldLeft(f(z, this))((acc, subView) => subView.foldLeft(acc)(f)) + + /** Yields all (direct and indirect) subviews of this view in pre-order. + * The first element is this view. + */ + lazy val flatten: Seq[TransactionView] = + foldLeft(Seq.newBuilder[TransactionView])((acc, v) => acc += v).result() + + lazy val tryFlattenToParticipantViews: Seq[ParticipantTransactionView] = + flatten.map(ParticipantTransactionView.tryCreate) + + /** Yields all (direct and indirect) subviews of this view in pre-order, along with the subview position + * under the root view position `rootPos`. The first element is this view. + */ + def allSubviewsWithPosition(rootPos: ViewPosition): Seq[(TransactionView, ViewPosition)] = { + def helper( + view: TransactionView, + viewPos: ViewPosition, + ): Seq[(TransactionView, ViewPosition)] = { + (view, viewPos) +: view.subviews.unblindedElementsWithIndex.flatMap { + case (view, viewIndex) => helper(view, viewIndex +: viewPos) + } + } + + helper(this, rootPos) + } + + override def pretty: Pretty[TransactionView] = prettyOfClass( + param("root hash", _.rootHash), + param("view common data", _.viewCommonData), + param("view participant data", _.viewParticipantData), + param("subviews", _.subviews), + ) + + @VisibleForTesting + private[data] def copy( + viewCommonData: MerkleTree[ViewCommonData] = this.viewCommonData, + viewParticipantData: MerkleTree[ViewParticipantData] = this.viewParticipantData, + subviews: TransactionSubviews = this.subviews, + ) = + new TransactionView(viewCommonData, viewParticipantData, subviews)( + hashOps, + representativeProtocolVersion, + ) + + /** If the view with the given hash appears either as this view or one of its unblinded descendants, + * replace it by the given view. + * TODO(i12900): not stack safe unless we have limits on the depths of views. + */ + def replace(h: ViewHash, v: TransactionView): TransactionView = + if (viewHash == h) v + else this.copy(subviews = subviews.mapUnblinded(_.replace(h, v))) + + protected def toProtoV1: v1.ViewNode = v1.ViewNode( + viewCommonData = Some(MerkleTree.toBlindableNodeV1(viewCommonData)), + viewParticipantData = Some(MerkleTree.toBlindableNodeV1(viewParticipantData)), + subviews = Some(subviews.toProtoV1), + ) + + /** The global key inputs that the [[com.daml.lf.transaction.ContractStateMachine]] computes + * while interpreting the root action of the view, enriched with the maintainers of the key and the + * [[com.digitalasset.canton.protocol.LfTransactionVersion]] to be used for serializing the key. + * + * @throws java.lang.IllegalStateException if the [[ViewParticipantData]] of this view or any subview is blinded + */ + def globalKeyInputs(implicit + loggingContext: NamedLoggingContext + ): Map[LfGlobalKey, KeyResolutionWithMaintainers] = + _globalKeyInputs.get + + private[this] val _globalKeyInputs + : NamedLoggingLazyVal[Map[LfGlobalKey, KeyResolutionWithMaintainers]] = + NamedLoggingLazyVal[Map[LfGlobalKey, KeyResolutionWithMaintainers]] { implicit loggingContext => + val viewParticipantData = tryUnblindViewParticipantData("Global key inputs") + + subviews.assertAllUnblinded(hash => + s"Global key inputs of view $viewHash can be computed only if all subviews are unblinded, but ${hash} is blinded" + ) + + subviews.unblindedElements.foldLeft(viewParticipantData.resolvedKeysWithMaintainers) { + (acc, subview) => + val subviewGki = subview.globalKeyInputs + MapsUtil.mergeWith(acc, subviewGki) { (accRes, _subviewRes) => accRes } + } + } + + /** The input contracts of the view (including subviews). + * + * @throws java.lang.IllegalStateException if the [[ViewParticipantData]] of this view or any subview is blinded + */ + def inputContracts(implicit + loggingContext: NamedLoggingContext + ): Map[LfContractId, InputContract] = _inputsAndCreated.get._1 + + /** The contracts appearing in create nodes in the view (including subviews). + * + * @throws java.lang.IllegalStateException if the [[ViewParticipantData]] of this view or any subview is blinded + */ + def createdContracts(implicit + loggingContext: NamedLoggingContext + ): Map[LfContractId, CreatedContractInView] = _inputsAndCreated.get._2 + + private[this] val _inputsAndCreated: NamedLoggingLazyVal[ + (Map[LfContractId, InputContract], Map[LfContractId, CreatedContractInView]) + ] = NamedLoggingLazyVal[ + (Map[LfContractId, InputContract], Map[LfContractId, CreatedContractInView]) + ] { implicit loggingContext => + val vpd = viewParticipantData.unwrap.getOrElse( + ErrorUtil.internalError( + new IllegalStateException( + s"Inputs and created contracts of view $viewHash can be computed only if the view participant data is unblinded" + ) + ) + ) + val currentRollbackScope = vpd.rollbackContext.rollbackScope + subviews.assertAllUnblinded(hash => + s"Inputs and created contracts of view $viewHash can be computed only if all subviews are unblinded, but ${hash} is blinded" + ) + val subviewInputsAndCreated = subviews.unblindedElements.map { subview => + val subviewVpd = + subview.tryUnblindViewParticipantData("Inputs and created contracts") + val created = subview.createdContracts + val inputs = subview.inputContracts + val subviewRollbackScope = subviewVpd.rollbackContext.rollbackScope + // If the subview sits under a Rollback node in the view's core, + // then the created contracts of the subview are all rolled back, + // and all consuming inputs become non-consuming inputs. + if (subviewRollbackScope != currentRollbackScope) { + ( + inputs.fmap(_.copy(consumed = false)), + created.fmap(_.copy(rolledBack = true)), + ) + } else (inputs, created) + } + + val createdCore = vpd.createdCore.map { contract => + contract.contract.contractId -> CreatedContractInView.fromCreatedContract(contract) + }.toMap + subviewInputsAndCreated.foldLeft((vpd.coreInputs, createdCore)) { + case ((accInputs, accCreated), (subviewInputs, subviewCreated)) => + val subviewCreatedUpdated = subviewCreated.fmap { contract => + if (vpd.createdInSubviewArchivedInCore.contains(contract.contract.contractId)) + contract.copy(consumedInView = true) + else contract + } + val accCreatedUpdated = accCreated.fmap { contract => + if (subviewInputs.get(contract.contract.contractId).exists(_.consumed)) + contract.copy(consumedInView = true) + else contract + } + val nextCreated = MapsUtil.mergeWith(accCreatedUpdated, subviewCreatedUpdated) { + (fromAcc, _) => + // By the contract ID allocation scheme, the contract IDs in the subviews are pairwise distinct + // and distinct from `createdCore` + // TODO(i12901) Check this invariant somewhere + ErrorUtil.internalError( + new IllegalStateException( + s"Contract ${fromAcc.contract.contractId} is created multiple times in view $viewHash" + ) + ) + } + + val subviewNontransientInputs = subviewInputs.filter { case (cid, _) => + !accCreated.contains(cid) + } + val nextInputs = MapsUtil.mergeWith(accInputs, subviewNontransientInputs) { + (fromAcc, fromSubview) => + fromAcc.copy(consumed = fromAcc.consumed || fromSubview.consumed) + } + (nextInputs, nextCreated) + } + } + + /** The [[com.daml.lf.transaction.ContractStateMachine.ActiveLedgerState]] + * the [[com.daml.lf.transaction.ContractStateMachine]] reaches after interpreting the root action of the view. + * + * Must only be used in mode [[com.daml.lf.transaction.ContractKeyUniquenessMode.Strict]] + * + * @throws java.lang.IllegalStateException if the [[ViewParticipantData]] of this view or any subview is blinded. + */ + def activeLedgerState(implicit + loggingContext: NamedLoggingContext + ): ActiveLedgerState[Unit] = + _activeLedgerStateAndUpdatedKeys.get._1 + + /** The keys that this view updates (including reassigning the key), along with the maintainers of the key. + * + * Must only be used in mode [[com.daml.lf.transaction.ContractKeyUniquenessMode.Strict]] + * + * @throws java.lang.IllegalStateException if the [[ViewParticipantData]] of this view or any subview is blinded. + */ + def updatedKeys(implicit loggingContext: NamedLoggingContext): Map[LfGlobalKey, Set[LfPartyId]] = + _activeLedgerStateAndUpdatedKeys.get._2 + + /** The keys that this view updates (including reassigning the key), along with the assignment of that key at the end of the transaction. + * + * Must only be used in mode [[com.daml.lf.transaction.ContractKeyUniquenessMode.Strict]] + * + * @throws java.lang.IllegalStateException if the [[ViewParticipantData]] of this view or any subview is blinded. + */ + def updatedKeyValues(implicit + loggingContext: NamedLoggingContext + ): Map[LfGlobalKey, KeyMapping] = { + val localActiveKeys = activeLedgerState.localActiveKeys + def resolveKey(key: LfGlobalKey): KeyMapping = + localActiveKeys.get(key) match { + case None => + globalKeyInputs.get(key).map(_.resolution).flatten.filterNot(consumed.contains(_)) + case Some(mapping) => mapping + } + (localActiveKeys.keys ++ globalKeyInputs.keys).map(k => k -> resolveKey(k)).toMap + } + + private[this] val _activeLedgerStateAndUpdatedKeys + : NamedLoggingLazyVal[(ActiveLedgerState[Unit], Map[LfGlobalKey, Set[LfPartyId]])] = + NamedLoggingLazyVal[(ActiveLedgerState[Unit], Map[LfGlobalKey, Set[LfPartyId]])] { + implicit loggingContext => + val updatedKeysB = Map.newBuilder[LfGlobalKey, Set[LfPartyId]] + @SuppressWarnings(Array("org.wartremover.warts.Var")) + var localKeys: Map[LfGlobalKey, LfContractId] = Map.empty + + inputContracts.foreach { case (cid, inputContract) => + // Consuming exercises under a rollback node are rewritten to non-consuming exercises in the view inputs. + // So here we are looking only at key usages that are outside of rollback nodes (inside the view). + if (inputContract.consumed) { + inputContract.contract.metadata.maybeKeyWithMaintainers.foreach { kWithM => + val key = kWithM.globalKey + updatedKeysB += (key -> kWithM.maintainers) + } + } + } + createdContracts.foreach { case (cid, createdContract) => + if (!createdContract.rolledBack) { + createdContract.contract.metadata.maybeKeyWithMaintainers.foreach { kWithM => + val key = kWithM.globalKey + updatedKeysB += (key -> kWithM.maintainers) + if (!createdContract.consumedInView) { + // If we have an active contract, we use that mapping. + localKeys += key -> cid + } else { + if (!localKeys.contains(key)) { + // If all contracts are inactive, we arbitrarily use the first in createdContracts + // (createdContracts is not ordered) + localKeys += key -> cid + } + } + } + } + } + + val locallyCreatedThisTimeline = createdContracts.collect { + case (contractId, createdContract) if !createdContract.rolledBack => contractId + }.toSet + + ActiveLedgerState( + locallyCreatedThisTimeline = locallyCreatedThisTimeline, + consumedBy = consumed, + localKeys = localKeys, + ) -> + updatedKeysB.result() + } + + def consumed(implicit loggingContext: NamedLoggingContext): Map[LfContractId, Unit] = { + // In strict mode, every node involving a key updates the active ledger state + // unless it is under a rollback node. + // So it suffices to look at the created and input contracts + // Contract consumption under a rollback is ignored. + + val consumedInputs = inputContracts.collect { + // No need to check for contract.rolledBack because consumption under a rollback does not set the consumed flag + case (cid, contract) if contract.consumed => cid -> () + } + val consumedCreates = createdContracts.collect { + // If the creation is rolled back, then so are all archivals + // because a rolled-back create can only be used in the same or deeper rollback scopes, + // as ensured by `WellformedTransaction.checkCreatedContracts`. + case (cid, contract) if !contract.rolledBack && contract.consumedInView => cid -> () + } + consumedInputs ++ consumedCreates + } +} + +object TransactionView + extends HasProtocolVersionedWithContextCompanion[ + TransactionView, + (HashOps, ConfirmationPolicy), + ] { + override def name: String = "TransactionView" + override def supportedProtoVersions: SupportedProtoVersions = + SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)(v1.ViewNode)( + supportedProtoVersion(_)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + private def tryCreate( + viewCommonData: MerkleTree[ViewCommonData], + viewParticipantData: MerkleTree[ViewParticipantData], + subviews: TransactionSubviews, + representativeProtocolVersion: RepresentativeProtocolVersion[TransactionView.type], + )(hashOps: HashOps): TransactionView = + new TransactionView(viewCommonData, viewParticipantData, subviews)( + hashOps, + representativeProtocolVersion, + ) + + /** Creates a view. + * + * @throws InvalidView if the `viewCommonData` is unblinded and equals the `viewCommonData` of a direct subview + */ + def tryCreate(hashOps: HashOps)( + viewCommonData: MerkleTree[ViewCommonData], + viewParticipantData: MerkleTree[ViewParticipantData], + subviews: TransactionSubviews, + protocolVersion: ProtocolVersion, + ): TransactionView = + tryCreate( + viewCommonData, + viewParticipantData, + subviews, + protocolVersionRepresentativeFor(protocolVersion), + )(hashOps) + + private def createFromRepresentativePV(hashOps: HashOps)( + viewCommonData: MerkleTree[ViewCommonData], + viewParticipantData: MerkleTree[ViewParticipantData], + subviews: TransactionSubviews, + representativeProtocolVersion: RepresentativeProtocolVersion[TransactionView.type], + ): Either[String, TransactionView] = + Either + .catchOnly[InvalidView]( + TransactionView.tryCreate( + viewCommonData, + viewParticipantData, + subviews, + representativeProtocolVersion, + )(hashOps) + ) + .leftMap(_.message) + + /** Creates a view. + * + * Yields `Left(...)` if the `viewCommonData` is unblinded and equals the `viewCommonData` of a direct subview + */ + def create(hashOps: HashOps)( + viewCommonData: MerkleTree[ViewCommonData], + viewParticipantData: MerkleTree[ViewParticipantData], + subviews: TransactionSubviews, + protocolVersion: ProtocolVersion, + ): Either[String, TransactionView] = + Either + .catchOnly[InvalidView]( + TransactionView.tryCreate(hashOps)( + viewCommonData, + viewParticipantData, + subviews, + protocolVersion, + ) + ) + .leftMap(_.message) + + /** DO NOT USE IN PRODUCTION, as it does not necessarily check object invariants. */ + @VisibleForTesting + val viewCommonDataUnsafe: Lens[TransactionView, MerkleTree[ViewCommonData]] = + GenLens[TransactionView](_.viewCommonData) + + /** DO NOT USE IN PRODUCTION, as it does not necessarily check object invariants. */ + @VisibleForTesting + val viewParticipantDataUnsafe: Lens[TransactionView, MerkleTree[ViewParticipantData]] = + GenLens[TransactionView](_.viewParticipantData) + + private def fromProtoV1( + context: (HashOps, ConfirmationPolicy), + protoView: v1.ViewNode, + ): ParsingResult[TransactionView] = { + val (hashOps, _) = context + for { + commonData <- MerkleTree.fromProtoOptionV1( + protoView.viewCommonData, + ViewCommonData.fromByteString(context), + ) + participantData <- MerkleTree.fromProtoOptionV1( + protoView.viewParticipantData, + ViewParticipantData.fromByteString(hashOps), + ) + subViews <- TransactionSubviews.fromProtoV1(context, protoView.subviews) + view <- createFromRepresentativePV(hashOps)( + commonData, + participantData, + subViews, + protocolVersionRepresentativeFor(ProtoVersion(1)), + ).leftMap(e => + ProtoDeserializationError.OtherError(s"Unable to create transaction views: $e") + ) + } yield view + } + + /** Indicates an attempt to create an invalid view. */ + final case class InvalidView(message: String) extends RuntimeException(message) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransactionViewTree.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransactionViewTree.scala new file mode 100644 index 0000000000..c8d6ac6970 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransactionViewTree.scala @@ -0,0 +1,110 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.either.* +import com.digitalasset.canton.WorkflowId +import com.digitalasset.canton.data.ViewPosition.MerklePathElement +import com.digitalasset.canton.protocol.{ConfirmationPolicy, RootHash, TransactionId, ViewHash} +import com.digitalasset.canton.topology.{DomainId, MediatorRef} +import com.digitalasset.canton.util.EitherUtil + +import java.util.UUID + +trait TransactionViewTree extends ViewTree { + + import TransactionViewTree.* + + def tree: GenTransactionTree + + private[data] def findTheView( + viewsWithIndex: Seq[(TransactionView, MerklePathElement)], + viewPosition: ViewPosition = ViewPosition.root, + ): Either[String, (TransactionView, ViewPosition)] + + private[data] lazy val viewAndPositionOrErr: Either[String, (TransactionView, ViewPosition)] = + findTheView(tree.rootViews.unblindedElementsWithIndex) + + private[data] lazy val viewAndPosition: (TransactionView, ViewPosition) = + viewAndPositionOrErr.valueOr(msg => throw InvalidTransactionViewTree(msg)) + + /** The (top-most) unblinded view. */ + lazy val view: TransactionView = viewAndPosition._1 + + override lazy val viewPosition: ViewPosition = viewAndPosition._2 + + /** Determines whether `view` is top-level. */ + lazy val isTopLevel: Boolean = viewPosition.position.sizeCompare(1) == 0 + + def validated: Either[String, this.type] = for { + _ <- viewAndPositionOrErr + _ <- validateMetadata(tree, isTopLevel) + } yield this + + override def rootHash: RootHash = tree.rootHash + + lazy val transactionId: TransactionId = TransactionId.fromRootHash(rootHash) + + override def toBeSigned: Option[RootHash] = if (isTopLevel) Some(rootHash) else None + + /** Returns the hashes of the direct subviews of the view represented by this tree. + * By definition, all subviews are unblinded, therefore this will also work when the subviews + * are stored in a MerkleSeq. + */ + def subviewHashes: Seq[ViewHash] = view.subviews.trySubviewHashes + + override lazy val viewHash: ViewHash = ViewHash.fromRootHash(view.rootHash) + + override lazy val informees: Set[Informee] = view.viewCommonData.tryUnwrap.informees + + lazy val viewParticipantData: ViewParticipantData = view.viewParticipantData.tryUnwrap + + val submitterMetadataO: Option[SubmitterMetadata] = tree.submitterMetadata.unwrap.toOption + + private[data] lazy val commonMetadata: CommonMetadata = tree.commonMetadata.tryUnwrap + + lazy val transactionUuid: UUID = commonMetadata.uuid + + override def domainId: DomainId = commonMetadata.domainId + + override def mediator: MediatorRef = commonMetadata.mediator + + def confirmationPolicy: ConfirmationPolicy = commonMetadata.confirmationPolicy + + private[data] def participantMetadata: ParticipantMetadata = tree.participantMetadata.tryUnwrap + + lazy val ledgerTime: CantonTimestamp = participantMetadata.ledgerTime + + lazy val submissionTime: CantonTimestamp = participantMetadata.submissionTime + + lazy val workflowIdO: Option[WorkflowId] = participantMetadata.workflowIdO +} + +object TransactionViewTree { + + private[data] def validateMetadata( + tree: GenTransactionTree, + isTopLevel: Boolean, + ): Either[String, Unit] = for { + _ <- tree.commonMetadata.unwrap.leftMap(_ => + "The common metadata of a transaction view tree must be unblinded." + ) + + _ <- tree.participantMetadata.unwrap.leftMap(_ => + "The participant metadata of a transaction view tree must be unblinded." + ) + + _ <- EitherUtil.condUnitE( + isTopLevel == tree.submitterMetadata.isFullyUnblinded, + "The submitter metadata must be unblinded if and only if the represented view is top-level. " + + s"Submitter metadata: ${tree.submitterMetadata.unwrap.fold(_ => "blinded", _ => "unblinded")}, " + + s"isTopLevel: $isTopLevel", + ) + + } yield () + + /** Indicates an attempt to create an invalid [[TransactionViewTree]]. */ + final case class InvalidTransactionViewTree(message: String) extends RuntimeException(message) {} + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransferInViewTree.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransferInViewTree.scala new file mode 100644 index 0000000000..76ad3e8b6a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransferInViewTree.scala @@ -0,0 +1,536 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.digitalasset.canton.ProtoDeserializationError.OtherError +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.messages.{ + DeliveredTransferOutResult, + TransferInMediatorMessage, +} +import com.digitalasset.canton.protocol.{ + RootHash, + SerializableContract, + TargetDomainId, + TransactionId, + ViewHash, + v1, + v2, +} +import com.digitalasset.canton.sequencing.protocol.{SequencedEvent, SignedContent} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{ProtoConverter, ProtocolVersionedMemoizedEvidence} +import com.digitalasset.canton.topology.transaction.TrustLevel +import com.digitalasset.canton.topology.{DomainId, MediatorRef} +import com.digitalasset.canton.util.EitherUtil +import com.digitalasset.canton.version.Transfer.{SourceProtocolVersion, TargetProtocolVersion} +import com.digitalasset.canton.version.{ + HasMemoizedProtocolVersionedWithContextCompanion, + HasProtocolVersionedWrapper, + HasVersionedMessageWithContextCompanion, + HasVersionedToByteString, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} +import com.digitalasset.canton.{ + LedgerApplicationId, + LedgerCommandId, + LedgerParticipantId, + LedgerSubmissionId, + LfPartyId, + LfWorkflowId, + TransferCounter, + TransferCounterO, +} +import com.google.protobuf.ByteString + +import java.util.UUID + +/** A blindable Merkle tree for transfer-in requests */ + +final case class TransferInViewTree( + commonData: MerkleTree[TransferInCommonData], + view: MerkleTree[TransferInView], +)(hashOps: HashOps) + extends GenTransferViewTree[ + TransferInCommonData, + TransferInView, + TransferInViewTree, + TransferInMediatorMessage, + ](commonData, view)(hashOps) { + + override def createMediatorMessage(blindedTree: TransferInViewTree): TransferInMediatorMessage = + TransferInMediatorMessage(blindedTree) + + override private[data] def withBlindedSubtrees( + optimizedBlindingPolicy: PartialFunction[RootHash, MerkleTree.BlindingCommand] + ): MerkleTree[TransferInViewTree] = + TransferInViewTree( + commonData.doBlind(optimizedBlindingPolicy), + view.doBlind(optimizedBlindingPolicy), + )(hashOps) + + override def pretty: Pretty[TransferInViewTree] = prettyOfClass( + param("common data", _.commonData), + param("view", _.view), + ) +} + +object TransferInViewTree + extends HasVersionedMessageWithContextCompanion[TransferInViewTree, HashOps] { + override val name: String = "TransferInViewTree" + + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> ProtoCodec( + ProtocolVersion.v30, + supportedProtoVersion(v1.TransferViewTree)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + def fromProtoV1( + hashOps: HashOps, + transferInViewTreeP: v1.TransferViewTree, + ): ParsingResult[TransferInViewTree] = + GenTransferViewTree.fromProtoV1( + TransferInCommonData.fromByteString(hashOps), + TransferInView.fromByteString(hashOps), + )((commonData, view) => new TransferInViewTree(commonData, view)(hashOps))(transferInViewTreeP) +} + +/** Aggregates the data of a transfer-in request that is sent to the mediator and the involved participants. + * + * @param salt Salt for blinding the Merkle hash + * @param targetDomain The domain on which the contract is transferred in + * @param targetMediator The mediator that coordinates the transfer-in request on the target domain + * @param stakeholders The stakeholders of the transferred contract + * @param uuid The uuid of the transfer-in request + */ +final case class TransferInCommonData private ( + override val salt: Salt, + targetDomain: TargetDomainId, + targetMediator: MediatorRef, + stakeholders: Set[LfPartyId], + uuid: UUID, +)( + hashOps: HashOps, + val targetProtocolVersion: TargetProtocolVersion, + override val deserializedFrom: Option[ByteString], +) extends MerkleTreeLeaf[TransferInCommonData](hashOps) + with HasProtocolVersionedWrapper[TransferInCommonData] + with ProtocolVersionedMemoizedEvidence { + + override val representativeProtocolVersion + : RepresentativeProtocolVersion[TransferInCommonData.type] = + TransferInCommonData.protocolVersionRepresentativeFor(targetProtocolVersion.v) + + def confirmingParties: Set[Informee] = + stakeholders.map(ConfirmingParty(_, PositiveInt.one, TrustLevel.Ordinary)) + + @transient override protected lazy val companionObj: TransferInCommonData.type = + TransferInCommonData + + protected def toProtoV1: v1.TransferInCommonData = + v1.TransferInCommonData( + salt = Some(salt.toProtoV0), + targetDomain = targetDomain.toProtoPrimitive, + targetMediator = targetMediator.toProtoPrimitive, + stakeholders = stakeholders.toSeq, + uuid = ProtoConverter.UuidConverter.toProtoPrimitive(uuid), + targetProtocolVersion = targetProtocolVersion.v.toProtoPrimitive, + ) + + override def hashPurpose: HashPurpose = HashPurpose.TransferInCommonData + + override protected[this] def toByteStringUnmemoized: ByteString = + super[HasProtocolVersionedWrapper].toByteString + + override def pretty: Pretty[TransferInCommonData] = prettyOfClass( + param("target domain", _.targetDomain), + param("target mediator", _.targetMediator), + param("stakeholders", _.stakeholders), + param("uuid", _.uuid), + param("salt", _.salt), + ) +} + +object TransferInCommonData + extends HasMemoizedProtocolVersionedWithContextCompanion[TransferInCommonData, HashOps] { + override val name: String = "TransferInCommonData" + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)(v1.TransferInCommonData)( + supportedProtoVersionMemoized(_)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + def create(hashOps: HashOps)( + salt: Salt, + targetDomain: TargetDomainId, + targetMediator: MediatorRef, + stakeholders: Set[LfPartyId], + uuid: UUID, + targetProtocolVersion: TargetProtocolVersion, + ): TransferInCommonData = TransferInCommonData( + salt, + targetDomain, + targetMediator, + stakeholders, + uuid, + )(hashOps, targetProtocolVersion, None) + + private[this] def fromProtoV1(hashOps: HashOps, transferInCommonDataP: v1.TransferInCommonData)( + bytes: ByteString + ): ParsingResult[TransferInCommonData] = { + val v1.TransferInCommonData( + saltP, + targetDomainP, + stakeholdersP, + uuidP, + targetMediatorP, + protocolVersionP, + ) = + transferInCommonDataP + for { + salt <- ProtoConverter.parseRequired(Salt.fromProtoV0, "salt", saltP) + targetDomain <- TargetDomainId.fromProtoPrimitive(targetDomainP, "target_domain") + targetMediator <- MediatorRef.fromProtoPrimitive(targetMediatorP, "target_mediator") + stakeholders <- stakeholdersP.traverse(ProtoConverter.parseLfPartyId) + uuid <- ProtoConverter.UuidConverter.fromProtoPrimitive(uuidP) + protocolVersion <- ProtocolVersion.fromProtoPrimitive(protocolVersionP) + } yield TransferInCommonData( + salt, + targetDomain, + targetMediator, + stakeholders.toSet, + uuid, + )( + hashOps, + TargetProtocolVersion(protocolVersion), // TODO(#12626) + Some(bytes), + ) + } +} + +// TODO(#15159) For transfer counter, remove the note that it is defined iff... +/** Aggregates the data of a transfer-in request that is only sent to the involved participants + * + * @param salt The salt to blind the Merkle hash + * @param submitter The submitter of the transfer-in request + * @param creatingTransactionId The id of the transaction that created the contract + * @param contract The contract to be transferred including the instance + * @param transferOutResultEvent The signed deliver event of the transfer-out result message + * @param transferCounter The [[com.digitalasset.canton.TransferCounter]] of the contract. + */ +final case class TransferInView private ( + override val salt: Salt, + submitterMetadata: TransferSubmitterMetadata, + contract: SerializableContract, + creatingTransactionId: TransactionId, + transferOutResultEvent: DeliveredTransferOutResult, + sourceProtocolVersion: SourceProtocolVersion, + // TODO(#15179) Remove the option + transferCounter: TransferCounterO, +)( + hashOps: HashOps, + override val representativeProtocolVersion: RepresentativeProtocolVersion[TransferInView.type], + override val deserializedFrom: Option[ByteString], +) extends MerkleTreeLeaf[TransferInView](hashOps) + with HasProtocolVersionedWrapper[TransferInView] + with ProtocolVersionedMemoizedEvidence { + + override def hashPurpose: HashPurpose = HashPurpose.TransferInView + + @transient override protected lazy val companionObj: TransferInView.type = TransferInView + + val submitter: LfPartyId = submitterMetadata.submitter + val submittingParticipant: LedgerParticipantId = submitterMetadata.submittingParticipant + val applicationId: LedgerApplicationId = submitterMetadata.applicationId + val submissionId: Option[LedgerSubmissionId] = submitterMetadata.submissionId + val commandId: LedgerCommandId = submitterMetadata.commandId + val workflowId: Option[LfWorkflowId] = submitterMetadata.workflowId + + protected def toProtoV2: v2.TransferInView = + v2.TransferInView( + salt = Some(salt.toProtoV0), + submitter = submitter, + contract = Some(contract.toProtoV1), + creatingTransactionId = creatingTransactionId.toProtoPrimitive, + transferOutResultEvent = Some(transferOutResultEvent.result.toProtoV1), + sourceProtocolVersion = sourceProtocolVersion.v.toProtoPrimitive, + submittingParticipant = submittingParticipant, + applicationId = applicationId, + submissionId = submissionId.getOrElse(""), + workflowId = workflowId.getOrElse(""), + commandId = commandId, + transferCounter = transferCounter + .getOrElse( + throw new IllegalStateException( + s"Transfer counter must be defined at representative protocol version ${representativeProtocolVersion}" + ) + ) + .toProtoPrimitive, + ) + + override protected[this] def toByteStringUnmemoized: ByteString = + super[HasProtocolVersionedWrapper].toByteString + + override def pretty: Pretty[TransferInView] = prettyOfClass( + param("submitter", _.submitter), + param("contract", _.contract), // TODO(#3269) this may contain confidential data + paramIfDefined("transfer counter", _.transferCounter), + param("creating transaction id", _.creatingTransactionId), + param("transfer out result", _.transferOutResultEvent), + param("submitting participant", _.submittingParticipant), + param("application id", _.applicationId), + paramIfDefined("submission id", _.submissionId), + paramIfDefined("workflow id", _.workflowId), + param("salt", _.salt), + ) +} + +object TransferInView + extends HasMemoizedProtocolVersionedWithContextCompanion[TransferInView, HashOps] { + override val name: String = "TransferInView" + + private[TransferInView] final case class CommonData( + salt: Salt, + submitter: LfPartyId, + creatingTransactionId: TransactionId, + transferOutResultEvent: DeliveredTransferOutResult, + sourceProtocolVersion: SourceProtocolVersion, + ) + + private[TransferInView] object CommonData { + def fromProto( + hashOps: HashOps, + saltP: Option[com.digitalasset.canton.crypto.v0.Salt], + submitterP: String, + transferOutResultEventPO: Option[v1.SignedContent], + creatingTransactionIdP: ByteString, + sourceProtocolVersion: ProtocolVersion, + ): ParsingResult[CommonData] = { + for { + salt <- ProtoConverter.parseRequired(Salt.fromProtoV0, "salt", saltP) + submitter <- ProtoConverter.parseLfPartyId(submitterP) + // TransferOutResultEvent deserialization + transferOutResultEventP <- ProtoConverter + .required("TransferInView.transferOutResultEvent", transferOutResultEventPO) + + transferOutResultEventMC <- SignedContent + .fromProtoV1(transferOutResultEventP) + .flatMap( + _.deserializeContent(SequencedEvent.fromByteStringOpen(hashOps, sourceProtocolVersion)) + ) + transferOutResultEvent <- DeliveredTransferOutResult + .create(Right(transferOutResultEventMC)) + .leftMap(err => OtherError(err.toString)) + creatingTransactionId <- TransactionId.fromProtoPrimitive(creatingTransactionIdP) + } yield CommonData( + salt, + submitter, + creatingTransactionId, + transferOutResultEvent, + SourceProtocolVersion(sourceProtocolVersion), + ) + } + } + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(2) -> VersionedProtoConverter(ProtocolVersion.v30)(v2.TransferInView)( + supportedProtoVersionMemoized(_)(fromProtoV2), + _.toProtoV2.toByteString, + ) + ) + + private lazy val rpvMultidomain: RepresentativeProtocolVersion[TransferInView.type] = + protocolVersionRepresentativeFor(ProtocolVersion.v30) + + lazy val submittingParticipantDefaultValue: DefaultValueUntilExclusive[LedgerParticipantId] = + DefaultValueUntilExclusive( + _.submitterMetadata.submittingParticipant, + "submitterMetadata.submittingParticipant", + rpvMultidomain, + LedgerParticipantId.assertFromString("no-participant-id"), + ) + + lazy val commandIdDefaultValue: DefaultValueUntilExclusive[LedgerCommandId] = + DefaultValueUntilExclusive( + _.submitterMetadata.commandId, + "submitterMetadata.commandId", + rpvMultidomain, + LedgerCommandId.assertFromString("no-command-id"), + ) + + lazy val applicationIdDefaultValue: DefaultValueUntilExclusive[LedgerApplicationId] = + DefaultValueUntilExclusive( + _.submitterMetadata.applicationId, + "submitterMetadata.applicationId", + rpvMultidomain, + LedgerApplicationId.assertFromString("no-application-id"), + ) + + lazy val submissionIdDefaultValue: DefaultValueUntilExclusive[Option[LedgerSubmissionId]] = + DefaultValueUntilExclusive( + _.submitterMetadata.submissionId, + "submitterMetadata.submissionId", + rpvMultidomain, + None, + ) + + lazy val workflowIdDefaultValue: DefaultValueUntilExclusive[Option[LfWorkflowId]] = + DefaultValueUntilExclusive( + _.submitterMetadata.workflowId, + "submitterMetadata.worfklowId", + rpvMultidomain, + None, + ) + + def create(hashOps: HashOps)( + salt: Salt, + submitterMetadata: TransferSubmitterMetadata, + contract: SerializableContract, + creatingTransactionId: TransactionId, + transferOutResultEvent: DeliveredTransferOutResult, + sourceProtocolVersion: SourceProtocolVersion, + targetProtocolVersion: TargetProtocolVersion, + transferCounter: TransferCounterO, + ): Either[String, TransferInView] = Either + .catchOnly[IllegalArgumentException]( + TransferInView( + salt, + submitterMetadata, + contract, + creatingTransactionId, + transferOutResultEvent, + sourceProtocolVersion, + transferCounter, + )(hashOps, protocolVersionRepresentativeFor(targetProtocolVersion.v), None) + ) + .leftMap(_.getMessage) + + private[this] def fromProtoV2(hashOps: HashOps, transferInViewP: v2.TransferInView)( + bytes: ByteString + ): ParsingResult[TransferInView] = { + val v2.TransferInView( + saltP, + submitterP, + contractP, + transferOutResultEventPO, + creatingTransactionIdP, + sourceProtocolVersionP, + submittingParticipantP, + applicationIdP, + submissionIdP, + workflowIdP, + commandIdP, + transferCounterP, + ) = + transferInViewP + for { + protocolVersion <- ProtocolVersion.fromProtoPrimitive(sourceProtocolVersionP) + commonData <- CommonData.fromProto( + hashOps, + saltP, + submitterP, + transferOutResultEventPO, + creatingTransactionIdP, + protocolVersion, + ) + contract <- ProtoConverter + .required("contract", contractP) + .flatMap(SerializableContract.fromProtoV1) + submittingParticipantId <- + ProtoConverter.parseLfParticipantId(submittingParticipantP) + applicationId <- ProtoConverter.parseLFApplicationId(applicationIdP) + submissionId <- ProtoConverter.parseLFSubmissionIdO(submissionIdP) + workflowId <- ProtoConverter.parseLFWorkflowIdO(workflowIdP) + commandId <- ProtoConverter.parseCommandId(commandIdP) + } yield TransferInView( + commonData.salt, + TransferSubmitterMetadata( + commonData.submitter, + applicationId, + submittingParticipantId, + commandId, + submissionId, + workflowId, + ), + contract, + commonData.creatingTransactionId, + commonData.transferOutResultEvent, + commonData.sourceProtocolVersion, + Some(TransferCounter(transferCounterP)), + )(hashOps, protocolVersionRepresentativeFor(ProtoVersion(2)), Some(bytes)) + } +} + +/** A fully unblinded [[TransferInViewTree]] + * + * @throws java.lang.IllegalArgumentException if the [[tree]] is not fully unblinded + */ +final case class FullTransferInTree(tree: TransferInViewTree) + extends TransferViewTree + with HasVersionedToByteString + with PrettyPrinting { + require(tree.isFullyUnblinded, "A transfer-in request must be fully unblinded") + + private[this] val commonData = tree.commonData.tryUnwrap + private[this] val view = tree.view.tryUnwrap + + def submitter: LfPartyId = view.submitter + + def submitterMetadata: TransferSubmitterMetadata = view.submitterMetadata + + def workflowId: Option[LfWorkflowId] = view.workflowId + + def stakeholders: Set[LfPartyId] = commonData.stakeholders + + def contract: SerializableContract = view.contract + + def transferCounter: TransferCounterO = view.transferCounter + + def creatingTransactionId: TransactionId = view.creatingTransactionId + + def transferOutResultEvent: DeliveredTransferOutResult = view.transferOutResultEvent + + def mediatorMessage: TransferInMediatorMessage = tree.mediatorMessage + + override def domainId: DomainId = commonData.targetDomain.unwrap + + def targetDomain: TargetDomainId = commonData.targetDomain + + override def mediator: MediatorRef = commonData.targetMediator + + override def informees: Set[Informee] = commonData.confirmingParties + + override def toBeSigned: Option[RootHash] = Some(tree.rootHash) + + override def viewHash: ViewHash = tree.viewHash + + override def toByteString(version: ProtocolVersion): ByteString = tree.toByteString(version) + + override def rootHash: RootHash = tree.rootHash + + override def pretty: Pretty[FullTransferInTree] = prettyOfClass(unnamedParam(_.tree)) +} + +object FullTransferInTree { + def fromByteString( + crypto: CryptoPureApi + )(bytes: ByteString): ParsingResult[FullTransferInTree] = + for { + tree <- TransferInViewTree.fromByteString(crypto)(bytes) + _ <- EitherUtil.condUnitE( + tree.isFullyUnblinded, + OtherError(s"Transfer-in request ${tree.rootHash} is not fully unblinded"), + ) + } yield FullTransferInTree(tree) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransferOutViewTree.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransferOutViewTree.scala new file mode 100644 index 0000000000..cd9529c813 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransferOutViewTree.scala @@ -0,0 +1,470 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.traverse.* +import com.digitalasset.canton.ProtoDeserializationError.OtherError +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.protocol.messages.TransferOutMediatorMessage +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{ProtoConverter, ProtocolVersionedMemoizedEvidence} +import com.digitalasset.canton.time.TimeProof +import com.digitalasset.canton.topology.transaction.TrustLevel +import com.digitalasset.canton.topology.{DomainId, MediatorRef} +import com.digitalasset.canton.util.EitherUtil +import com.digitalasset.canton.version.Transfer.{SourceProtocolVersion, TargetProtocolVersion} +import com.digitalasset.canton.version.* +import com.digitalasset.canton.{ + LedgerApplicationId, + LedgerCommandId, + LedgerParticipantId, + LedgerSubmissionId, + LfPartyId, + LfWorkflowId, + TransferCounter, + TransferCounterO, +} +import com.google.protobuf.ByteString + +import java.util.UUID + +/** A blindable Merkle tree for transfer-out requests */ +final case class TransferOutViewTree private ( + commonData: MerkleTree[TransferOutCommonData], + view: MerkleTree[TransferOutView], +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + TransferOutViewTree.type + ], + hashOps: HashOps, +) extends GenTransferViewTree[ + TransferOutCommonData, + TransferOutView, + TransferOutViewTree, + TransferOutMediatorMessage, + ](commonData, view)(hashOps) + with HasRepresentativeProtocolVersion { + + override private[data] def withBlindedSubtrees( + optimizedBlindingPolicy: PartialFunction[RootHash, MerkleTree.BlindingCommand] + ): MerkleTree[TransferOutViewTree] = + TransferOutViewTree( + commonData.doBlind(optimizedBlindingPolicy), + view.doBlind(optimizedBlindingPolicy), + )(representativeProtocolVersion, hashOps) + + protected[this] override def createMediatorMessage( + blindedTree: TransferOutViewTree + ): TransferOutMediatorMessage = + TransferOutMediatorMessage(blindedTree) + + override def pretty: Pretty[TransferOutViewTree] = prettyOfClass( + param("common data", _.commonData), + param("view", _.view), + ) + + @transient override protected lazy val companionObj: TransferOutViewTree.type = + TransferOutViewTree +} + +object TransferOutViewTree + extends HasProtocolVersionedWithContextCompanion[TransferOutViewTree, HashOps] { + + override val name: String = "TransferOutViewTree" + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)(v1.TransferViewTree)( + supportedProtoVersion(_)((hashOps, proto) => fromProtoV1(hashOps)(proto)), + _.toProtoV1.toByteString, + ) + ) + + def apply( + commonData: MerkleTree[TransferOutCommonData], + view: MerkleTree[TransferOutView], + protocolVersion: ProtocolVersion, + hashOps: HashOps, + ): TransferOutViewTree = + TransferOutViewTree(commonData, view)( + TransferOutViewTree.protocolVersionRepresentativeFor(protocolVersion), + hashOps, + ) + + def fromProtoV1(hashOps: HashOps)( + transferOutViewTreeP: v1.TransferViewTree + ): ParsingResult[TransferOutViewTree] = + GenTransferViewTree.fromProtoV1( + TransferOutCommonData.fromByteString(hashOps), + TransferOutView.fromByteString(hashOps), + )((commonData, view) => + TransferOutViewTree(commonData, view)( + protocolVersionRepresentativeFor(ProtoVersion(1)), + hashOps, + ) + )(transferOutViewTreeP) +} + +/** Aggregates the data of a transfer-out request that is sent to the mediator and the involved participants. + * + * @param salt Salt for blinding the Merkle hash + * @param sourceDomain The domain to which the transfer-out request is sent + * @param sourceMediator The mediator that coordinates the transfer-out request on the source domain + * @param stakeholders The stakeholders of the contract to be transferred + * @param adminParties The admin parties of transferring transfer-out participants + * @param uuid The request UUID of the transfer-out + */ +final case class TransferOutCommonData private ( + override val salt: Salt, + sourceDomain: SourceDomainId, + sourceMediator: MediatorRef, + stakeholders: Set[LfPartyId], + adminParties: Set[LfPartyId], + uuid: UUID, +)( + hashOps: HashOps, + val protocolVersion: SourceProtocolVersion, + override val deserializedFrom: Option[ByteString], +) extends MerkleTreeLeaf[TransferOutCommonData](hashOps) + with HasProtocolVersionedWrapper[TransferOutCommonData] + with ProtocolVersionedMemoizedEvidence { + + @transient override protected lazy val companionObj: TransferOutCommonData.type = + TransferOutCommonData + + override val representativeProtocolVersion + : RepresentativeProtocolVersion[TransferOutCommonData.type] = + TransferOutCommonData.protocolVersionRepresentativeFor(protocolVersion.v) + + protected def toProtoV1: v1.TransferOutCommonData = + v1.TransferOutCommonData( + salt = Some(salt.toProtoV0), + sourceDomain = sourceDomain.toProtoPrimitive, + sourceMediator = sourceMediator.toProtoPrimitive, + stakeholders = stakeholders.toSeq, + adminParties = adminParties.toSeq, + uuid = ProtoConverter.UuidConverter.toProtoPrimitive(uuid), + sourceProtocolVersion = protocolVersion.v.toProtoPrimitive, + ) + + override protected[this] def toByteStringUnmemoized: ByteString = + super[HasProtocolVersionedWrapper].toByteString + + override def hashPurpose: HashPurpose = HashPurpose.TransferOutCommonData + + def confirmingParties: Set[Informee] = + (stakeholders ++ adminParties).map(ConfirmingParty(_, PositiveInt.one, TrustLevel.Ordinary)) + + override def pretty: Pretty[TransferOutCommonData] = prettyOfClass( + param("source domain", _.sourceDomain), + param("source mediator", _.sourceMediator), + param("stakeholders", _.stakeholders), + param("admin parties", _.adminParties), + param("uuid", _.uuid), + param("salt", _.salt), + ) +} + +object TransferOutCommonData + extends HasMemoizedProtocolVersionedWithContextCompanion[ + TransferOutCommonData, + HashOps, + ] { + override val name: String = "TransferOutCommonData" + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)(v1.TransferOutCommonData)( + supportedProtoVersionMemoized(_)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + def create(hashOps: HashOps)( + salt: Salt, + sourceDomain: SourceDomainId, + sourceMediator: MediatorRef, + stakeholders: Set[LfPartyId], + adminParties: Set[LfPartyId], + uuid: UUID, + protocolVersion: SourceProtocolVersion, + ): TransferOutCommonData = TransferOutCommonData( + salt, + sourceDomain, + sourceMediator, + stakeholders, + adminParties, + uuid, + )(hashOps, protocolVersion, None) + + private[this] def fromProtoV1(hashOps: HashOps, transferOutCommonDataP: v1.TransferOutCommonData)( + bytes: ByteString + ): ParsingResult[TransferOutCommonData] = { + val v1.TransferOutCommonData( + saltP, + sourceDomainP, + stakeholdersP, + adminPartiesP, + uuidP, + sourceMediatorP, + protocolVersionP, + ) = transferOutCommonDataP + + for { + salt <- ProtoConverter.parseRequired(Salt.fromProtoV0, "salt", saltP) + sourceDomain <- SourceDomainId.fromProtoPrimitive(sourceDomainP, "source_domain") + sourceMediator <- MediatorRef.fromProtoPrimitive(sourceMediatorP, "source_mediator") + stakeholders <- stakeholdersP.traverse(ProtoConverter.parseLfPartyId) + adminParties <- adminPartiesP.traverse(ProtoConverter.parseLfPartyId) + uuid <- ProtoConverter.UuidConverter.fromProtoPrimitive(uuidP) + protocolVersion <- ProtocolVersion.fromProtoPrimitive(protocolVersionP) + + } yield TransferOutCommonData( + salt, + sourceDomain, + sourceMediator, + stakeholders.toSet, + adminParties.toSet, + uuid, + )(hashOps, SourceProtocolVersion(protocolVersion), Some(bytes)) + } +} + +/** Aggregates the data of a transfer-out request that is only sent to the involved participants + */ +/** @param salt The salt used to blind the Merkle hash. + * @param submitterMetadata Metadata of the submitter + * @param creatingTransactionId Id of the transaction that created the contract + * @param contract Contract being transferred + * @param targetDomain The domain to which the contract is transferred. + * @param targetTimeProof The sequenced event from the target domain whose timestamp defines + * the baseline for measuring time periods on the target domain + * @param targetProtocolVersion Protocol version of the target domain + */ +final case class TransferOutView private ( + override val salt: Salt, + submitterMetadata: TransferSubmitterMetadata, + creatingTransactionId: TransactionId, + contract: SerializableContract, + targetDomain: TargetDomainId, + targetTimeProof: TimeProof, + targetProtocolVersion: TargetProtocolVersion, + transferCounter: TransferCounter, +)( + hashOps: HashOps, + override val representativeProtocolVersion: RepresentativeProtocolVersion[TransferOutView.type], + override val deserializedFrom: Option[ByteString], +) extends MerkleTreeLeaf[TransferOutView](hashOps) + with HasProtocolVersionedWrapper[TransferOutView] + with ProtocolVersionedMemoizedEvidence { + + @transient override protected lazy val companionObj: TransferOutView.type = TransferOutView + + override protected[this] def toByteStringUnmemoized: ByteString = + super[HasProtocolVersionedWrapper].toByteString + + def hashPurpose: HashPurpose = HashPurpose.TransferOutView + + def submitter: LfPartyId = submitterMetadata.submitter + def submittingParticipant: LedgerParticipantId = submitterMetadata.submittingParticipant + def applicationId: LedgerApplicationId = submitterMetadata.applicationId + def submissionId: Option[LedgerSubmissionId] = submitterMetadata.submissionId + def commandId: LedgerCommandId = submitterMetadata.commandId + def workflowId: Option[LfWorkflowId] = submitterMetadata.workflowId + + def templateId: LfTemplateId = + contract.rawContractInstance.contractInstance.unversioned.template + + protected def toProtoV2: v2.TransferOutView = + v2.TransferOutView( + salt = Some(salt.toProtoV0), + submitter = submitter, + targetDomain = targetDomain.toProtoPrimitive, + targetTimeProof = Some(targetTimeProof.toProtoV0), + targetProtocolVersion = targetProtocolVersion.v.toProtoPrimitive, + submittingParticipant = submittingParticipant, + applicationId = applicationId, + submissionId = submissionId.getOrElse(""), + workflowId = workflowId.getOrElse(""), + commandId = commandId, + transferCounter = transferCounter.toProtoPrimitive, + creatingTransactionId = creatingTransactionId.toProtoPrimitive, + contract = Some(contract.toProtoV1), + ) + + override def pretty: Pretty[TransferOutView] = prettyOfClass( + param("submitterMetadata", _.submitterMetadata), + param("template id", _.templateId), + param("creatingTransactionId", _.creatingTransactionId), + param("contract", _.contract), + param("target domain", _.targetDomain), + param("target time proof", _.targetTimeProof), + param("target protocol version", _.targetProtocolVersion.v), + param("salt", _.salt), + ) +} + +object TransferOutView + extends HasMemoizedProtocolVersionedWithContextCompanion[TransferOutView, HashOps] { + override val name: String = "TransferOutView" + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(2) -> VersionedProtoConverter(ProtocolVersion.v30)(v2.TransferOutView)( + supportedProtoVersionMemoized(_)(fromProtoV2), + _.toProtoV2.toByteString, + ) + ) + + def create(hashOps: HashOps)( + salt: Salt, + submitterMetadata: TransferSubmitterMetadata, + creatingTransactionId: TransactionId, + contract: SerializableContract, + targetDomain: TargetDomainId, + targetTimeProof: TimeProof, + sourceProtocolVersion: SourceProtocolVersion, + targetProtocolVersion: TargetProtocolVersion, + transferCounter: TransferCounterO, + ): TransferOutView = + TransferOutView( + salt, + submitterMetadata, + creatingTransactionId, + contract, + targetDomain, + targetTimeProof, + targetProtocolVersion, + transferCounter.getOrElse(throw new IllegalArgumentException("Missing transfer counter.")), + )(hashOps, protocolVersionRepresentativeFor(sourceProtocolVersion.v), None) + + private[this] def fromProtoV2(hashOps: HashOps, transferOutViewP: v2.TransferOutView)( + bytes: ByteString + ): ParsingResult[TransferOutView] = { + val v2.TransferOutView( + saltP, + submitterP, + targetDomainP, + targetTimeProofP, + targetProtocolVersionP, + submittingParticipantP, + applicationIdP, + submissionIdP, + workflowIdP, + commandIdP, + transferCounter, + creatingTransactionIdP, + contractPO, + ) = transferOutViewP + + for { + salt <- ProtoConverter.parseRequired(Salt.fromProtoV0, "salt", saltP) + submitter <- ProtoConverter.parseLfPartyId(submitterP) + targetDomain <- DomainId.fromProtoPrimitive(targetDomainP, "targetDomain") + targetProtocolVersion <- ProtocolVersion.fromProtoPrimitive(targetProtocolVersionP) + targetTimeProof <- ProtoConverter + .required("targetTimeProof", targetTimeProofP) + .flatMap(TimeProof.fromProtoV0(targetProtocolVersion, hashOps)) + submittingParticipant <- ProtoConverter.parseLfParticipantId(submittingParticipantP) + applicationId <- ProtoConverter.parseLFApplicationId(applicationIdP) + submissionId <- ProtoConverter.parseLFSubmissionIdO(submissionIdP) + workflowId <- ProtoConverter.parseLFWorkflowIdO(workflowIdP) + commandId <- ProtoConverter.parseCommandId(commandIdP) + creatingTransactionId <- TransactionId.fromProtoPrimitive(creatingTransactionIdP) + contract <- ProtoConverter + .required("TransferOutViewTree.contract", contractPO) + .flatMap(SerializableContract.fromProtoV1) + + } yield TransferOutView( + salt, + TransferSubmitterMetadata( + submitter, + applicationId, + submittingParticipant, + commandId, + submissionId, + workflowId, + ), + creatingTransactionId, + contract, + TargetDomainId(targetDomain), + targetTimeProof, + TargetProtocolVersion(targetProtocolVersion), + TransferCounter(transferCounter), + )( + hashOps, + protocolVersionRepresentativeFor(ProtoVersion(2)), // TODO(#12626) + Some(bytes), + ) + + } +} + +/** A fully unblinded [[TransferOutViewTree]] + * + * @throws java.lang.IllegalArgumentException if the [[tree]] is not fully unblinded + */ +final case class FullTransferOutTree(tree: TransferOutViewTree) + extends TransferViewTree + with HasVersionedToByteString + with PrettyPrinting { + require(tree.isFullyUnblinded, "A transfer-out request must be fully unblinded") + + private[this] val commonData = tree.commonData.tryUnwrap + private[this] val view = tree.view.tryUnwrap + + def submitter: LfPartyId = view.submitter + + def submitterMetadata: TransferSubmitterMetadata = view.submitterMetadata + def workflowId: Option[LfWorkflowId] = view.workflowId + + def stakeholders: Set[LfPartyId] = commonData.stakeholders + + def adminParties: Set[LfPartyId] = commonData.adminParties + + def contractId: LfContractId = view.contract.contractId + + def templateId: LfTemplateId = view.templateId + + def transferCounter: TransferCounter = view.transferCounter + + def sourceDomain: SourceDomainId = commonData.sourceDomain + + def targetDomain: TargetDomainId = view.targetDomain + + def targetDomainPV: TargetProtocolVersion = view.targetProtocolVersion + + def targetTimeProof: TimeProof = view.targetTimeProof + + def mediatorMessage: TransferOutMediatorMessage = tree.mediatorMessage + + override def domainId: DomainId = sourceDomain.unwrap + + override def mediator: MediatorRef = commonData.sourceMediator + + override def informees: Set[Informee] = commonData.confirmingParties + + override def toBeSigned: Option[RootHash] = Some(tree.rootHash) + + override def viewHash: ViewHash = tree.viewHash + + override def rootHash: RootHash = tree.rootHash + + override def pretty: Pretty[FullTransferOutTree] = prettyOfClass(unnamedParam(_.tree)) + + override def toByteString(version: ProtocolVersion): ByteString = tree.toByteString(version) +} + +object FullTransferOutTree { + def fromByteString( + crypto: CryptoPureApi + )(bytes: ByteString): ParsingResult[FullTransferOutTree] = + for { + tree <- TransferOutViewTree.fromByteString(crypto)(bytes) + _ <- EitherUtil.condUnitE( + tree.isFullyUnblinded, + OtherError(s"Transfer-out request ${tree.rootHash} is not fully unblinded"), + ) + } yield FullTransferOutTree(tree) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransferSubmitterMetadata.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransferSubmitterMetadata.scala new file mode 100644 index 0000000000..65c221b2cc --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/TransferSubmitterMetadata.scala @@ -0,0 +1,29 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import com.digitalasset.canton.* +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} + +/** Information about the submitters of the transaction in the case of a Transfer. + * This data structure is quite similar to [[com.digitalasset.canton.data.SubmitterMetadata]] + * but differ on a small number of fields. + */ +final case class TransferSubmitterMetadata( + submitter: LfPartyId, + applicationId: LedgerApplicationId, + submittingParticipant: LedgerParticipantId, + commandId: LedgerCommandId, + submissionId: Option[LedgerSubmissionId], + workflowId: Option[LfWorkflowId], +) extends PrettyPrinting { + + override def pretty: Pretty[TransferSubmitterMetadata] = prettyOfClass( + param("submitter", _.submitter), + param("application id", _.applicationId), + param("submitter participant", _.submittingParticipant), + param("command id", _.commandId), + paramIfDefined("submission id", _.submissionId), + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewCommonData.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewCommonData.scala new file mode 100644 index 0000000000..ad3db9d103 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewCommonData.scala @@ -0,0 +1,143 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.digitalasset.canton.ProtoDeserializationError.InvariantViolation +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.protocol.{ConfirmationPolicy, v1} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{ProtoConverter, ProtocolVersionedMemoizedEvidence} +import com.digitalasset.canton.version.* +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.ByteString + +/** Information concerning every '''member''' involved in processing the underlying view. + * + * @param threshold If the sum of the weights of the parties approving the view attains the threshold, + * the view is considered approved. + */ +// This class is a reference example of serialization best practices, demonstrating: +// - memoized serialization, which is required if we need to compute a signature or cryptographic hash of a class +// - use of an UntypedVersionedMessage wrapper when serializing to an anonymous binary format +// Please consult the team if you intend to change the design of serialization. +// +// The constructor and `fromProto...` methods are private to ensure that clients cannot create instances with an incorrect `deserializedFrom` field. +// +// Optional parameters are strongly discouraged, as each parameter needs to be consciously set in a production context. +final case class ViewCommonData private ( + informees: Set[Informee], + threshold: NonNegativeInt, + salt: Salt, +)( + hashOps: HashOps, + override val representativeProtocolVersion: RepresentativeProtocolVersion[ViewCommonData.type], + override val deserializedFrom: Option[ByteString], +) extends MerkleTreeLeaf[ViewCommonData](hashOps) + // The class needs to implement ProtocolVersionedMemoizedEvidence, because we want that serialize always yields the same ByteString. + // This is to ensure that different participants compute the same hash after receiving a ViewCommonData over the network. + // (Recall that serialization is in general not guaranteed to be deterministic.) + with ProtocolVersionedMemoizedEvidence + // The class implements `HasProtocolVersionedWrapper` because we serialize it to an anonymous binary format and need to encode + // the version of the serialized Protobuf message + with HasProtocolVersionedWrapper[ViewCommonData] { + + // The toProto... methods are deliberately protected, as they could otherwise be abused to bypass memoization. + // + // If another serializable class contains a ViewCommonData, it has to include it as a ByteString + // (and not as "message ViewCommonData") in its ProtoBuf representation. + + @transient override protected lazy val companionObj: ViewCommonData.type = ViewCommonData + + // We use named parameters, because then the code remains correct even when the ProtoBuf code generator + // changes the order of parameters. + def toProtoV1: v1.ViewCommonData = + v1.ViewCommonData( + informees = informees.map(_.toProtoV1).toSeq, + threshold = threshold.unwrap, + salt = Some(salt.toProtoV0), + ) + + // When serializing the class to an anonymous binary format, we serialize it to an UntypedVersionedMessage version of the + // corresponding Protobuf message + override protected[this] def toByteStringUnmemoized: ByteString = toByteString + + override val hashPurpose: HashPurpose = HashPurpose.ViewCommonData + + override def pretty: Pretty[ViewCommonData] = prettyOfClass( + param("informees", _.informees), + param("threshold", _.threshold), + param("salt", _.salt), + ) + + @VisibleForTesting + def copy( + informees: Set[Informee] = this.informees, + threshold: NonNegativeInt = this.threshold, + salt: Salt = this.salt, + ): ViewCommonData = + ViewCommonData(informees, threshold, salt)(hashOps, representativeProtocolVersion, None) +} + +object ViewCommonData + extends HasMemoizedProtocolVersionedWithContextCompanion[ + ViewCommonData, + (HashOps, ConfirmationPolicy), + ] { + override val name: String = "ViewCommonData" + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)(v1.ViewCommonData)( + supportedProtoVersionMemoized(_)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + /** Creates a fresh [[ViewCommonData]]. */ + // The "create" method has the following advantages over the auto-generated "apply" method: + // - The parameter lists have been flipped to facilitate curried usages. + // - The deserializedFrom field cannot be set; so it cannot be set incorrectly. + // + // The method is called "create" instead of "apply" + // to not confuse the Idea compiler by overloading "apply". + // (This is not a problem with this particular class, but it has been a problem with other classes.) + def create(hashOps: HashOps)( + informees: Set[Informee], + threshold: NonNegativeInt, + salt: Salt, + protocolVersion: ProtocolVersion, + ): ViewCommonData = + // The deserializedFrom field is set to "None" as this is for creating "fresh" instances. + new ViewCommonData(informees, threshold, salt)( + hashOps, + protocolVersionRepresentativeFor(protocolVersion), + None, + ) + + private def fromProtoV1( + context: (HashOps, ConfirmationPolicy), + viewCommonDataP: v1.ViewCommonData, + )(bytes: ByteString): ParsingResult[ViewCommonData] = { + val (hashOps, _confirmationPolicy) = context + for { + informees <- viewCommonDataP.informees.traverse(Informee.fromProtoV1) + + salt <- ProtoConverter + .parseRequired(Salt.fromProtoV0, "salt", viewCommonDataP.salt) + .leftMap(_.inField("salt")) + + threshold <- (NonNegativeInt + .create(viewCommonDataP.threshold) + .leftMap(InvariantViolation.toProtoDeserializationError)) + .leftMap(_.inField("threshold")) + } yield new ViewCommonData(informees.toSet, threshold, salt)( + hashOps, + protocolVersionRepresentativeFor(ProtoVersion(1)), + Some(bytes), + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewParticipantData.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewParticipantData.scala new file mode 100644 index 0000000000..b65ecb8a57 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewParticipantData.scala @@ -0,0 +1,483 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.syntax.either.* +import cats.syntax.functor.* +import cats.syntax.traverse.* +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.data.ActionDescription.{ + CreateActionDescription, + ExerciseActionDescription, + FetchActionDescription, + LookupByKeyActionDescription, +} +import com.digitalasset.canton.data.ViewParticipantData.{InvalidViewParticipantData, RootAction} +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.protocol.ContractIdSyntax.* +import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{ + ProtoConverter, + ProtocolVersionedMemoizedEvidence, + SerializationCheckFailed, +} +import com.digitalasset.canton.version.* +import com.digitalasset.canton.{ + LfCommand, + LfCreateCommand, + LfExerciseByKeyCommand, + LfExerciseCommand, + LfFetchByKeyCommand, + LfFetchCommand, + LfLookupByKeyCommand, + LfPartyId, + ProtoDeserializationError, + checked, +} +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.ByteString + +/** Information concerning every '''participant''' involved in processing the underlying view. + * + * @param coreInputs [[LfContractId]] used by the core of the view and not assigned by a Create node in the view or its subviews, + * independently of whether the creation is rolled back. + * Every contract id is mapped to its contract instances and their meta-information. + * Contracts are marked as being [[InputContract.consumed]] iff + * they are consumed in the core of the view. + * @param createdCore associates contract ids of Create nodes in the core of the view to the corresponding contract + * instance. The elements are ordered in execution order. + * @param createdInSubviewArchivedInCore + * The contracts that are created in subviews and archived in the core. + * The archival has the same rollback scope as the view. + * For [[com.digitalasset.canton.protocol.WellFormedTransaction]]s, the creation therefore is not rolled + * back either as the archival can only refer to non-rolled back creates. + * @param resolvedKeys + * Specifies how to resolve [[com.daml.lf.engine.ResultNeedKey]] requests from DAMLe (resulting from e.g., fetchByKey, + * lookupByKey) when interpreting the view. The resolved contract IDs must be in the [[coreInputs]]. + * Stores only the resolution difference between this view's global key inputs + * [[com.digitalasset.canton.data.TransactionView.globalKeyInputs]] + * and the aggregated global key inputs from the subviews + * (see [[com.digitalasset.canton.data.TransactionView.globalKeyInputs]] for the aggregation algorithm). + * In [[com.daml.lf.transaction.ContractKeyUniquenessMode.Strict]], + * the [[com.digitalasset.canton.data.FreeKey]] resolutions must be checked during conflict detection. + * @param actionDescription The description of the root action of the view + * @param rollbackContext The rollback context of the root action of the view. + * @throws ViewParticipantData$.InvalidViewParticipantData + * if [[createdCore]] contains two elements with the same contract id, + * if [[coreInputs]]`(id).contractId != id` + * if [[createdInSubviewArchivedInCore]] overlaps with [[createdCore]]'s ids or [[coreInputs]] + * if [[coreInputs]] does not contain the resolved contract ids of [[resolvedKeys]] + * if the [[actionDescription]] is a [[com.digitalasset.canton.data.ActionDescription.CreateActionDescription]] + * and the created id is not the first contract ID in [[createdCore]] + * if the [[actionDescription]] is a [[com.digitalasset.canton.data.ActionDescription.ExerciseActionDescription]] + * or [[com.digitalasset.canton.data.ActionDescription.FetchActionDescription]] and the input contract is not in [[coreInputs]] + * if the [[actionDescription]] is a [[com.digitalasset.canton.data.ActionDescription.LookupByKeyActionDescription]] + * and the key is not in [[resolvedKeys]]. + * @throws com.digitalasset.canton.serialization.SerializationCheckFailed if this instance cannot be serialized + */ +final case class ViewParticipantData private ( + coreInputs: Map[LfContractId, InputContract], + createdCore: Seq[CreatedContract], + createdInSubviewArchivedInCore: Set[LfContractId], + resolvedKeys: Map[LfGlobalKey, SerializableKeyResolution], + actionDescription: ActionDescription, + rollbackContext: RollbackContext, + salt: Salt, +)( + hashOps: HashOps, + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + ViewParticipantData.type + ], + override val deserializedFrom: Option[ByteString], +) extends MerkleTreeLeaf[ViewParticipantData](hashOps) + with HasProtocolVersionedWrapper[ViewParticipantData] + with ProtocolVersionedMemoizedEvidence { + { + def requireDistinct[A](vals: Seq[A])(message: A => String): Unit = { + val set = scala.collection.mutable.Set[A]() + vals.foreach { v => + if (set(v)) throw InvalidViewParticipantData(message(v)) + else set += v + } + } + + val createdIds = createdCore.map(_.contract.contractId) + requireDistinct(createdIds) { id => + val indices = createdIds.zipWithIndex.collect { + case (createdId, idx) if createdId == id => idx + } + s"createdCore contains the contract id $id multiple times at indices ${indices.mkString(", ")}" + } + + coreInputs.foreach { case (id, usedContract) => + if (id != usedContract.contractId) + throw InvalidViewParticipantData( + s"Inconsistent ids for used contract: $id and ${usedContract.contractId}" + ) + + if (createdInSubviewArchivedInCore.contains(id)) + throw InvalidViewParticipantData( + s"Contracts created in a subview overlap with core inputs: $id" + ) + } + + val transientOverlap = createdInSubviewArchivedInCore intersect createdIds.toSet + if (transientOverlap.nonEmpty) + throw InvalidViewParticipantData( + s"Contract created in a subview are also created in the core: $transientOverlap" + ) + + def isAssignedKeyInconsistent( + keyWithResolution: (LfGlobalKey, SerializableKeyResolution) + ): Boolean = { + val (key, resolution) = keyWithResolution + resolution.resolution.fold(false) { cid => + val inconsistent = for { + inputContract <- coreInputs.get(cid) + declaredKey <- inputContract.contract.metadata.maybeKey + } yield declaredKey != key + inconsistent.getOrElse(true) + } + } + val keyInconsistencies = resolvedKeys.filter(isAssignedKeyInconsistent) + + if (keyInconsistencies.nonEmpty) { + throw InvalidViewParticipantData(show"Inconsistencies for resolved keys: $keyInconsistencies") + } + } + + def rootAction(enableContractUpgrading: Boolean): RootAction = + actionDescription match { + case CreateActionDescription(contractId, _seed, _version) => + val createdContract = createdCore.headOption.getOrElse( + throw InvalidViewParticipantData( + show"No created core contracts declared for a view that creates contract $contractId at the root" + ) + ) + if (createdContract.contract.contractId != contractId) + throw InvalidViewParticipantData( + show"View with root action Create $contractId declares ${createdContract.contract.contractId} as first created core contract." + ) + val metadata = createdContract.contract.metadata + val contractInst = createdContract.contract.rawContractInstance.contractInstance + + RootAction( + LfCreateCommand( + templateId = contractInst.unversioned.template, + argument = contractInst.unversioned.arg, + ), + metadata.signatories, + failed = false, + ) + + case ExerciseActionDescription( + inputContractId, + commandTemplateId, + choice, + interfaceId, + chosenValue, + actors, + byKey, + _seed, + _version, + failed, + ) => + val inputContract = coreInputs.getOrElse( + inputContractId, + throw InvalidViewParticipantData( + show"Input contract $inputContractId of the Exercise root action is not declared as core input." + ), + ) + + // commandTemplateId is not populated prior to ProtocolVersion.v5 + val templateId = commandTemplateId match { + case Some(templateId) if enableContractUpgrading => templateId + case _ => inputContract.contract.contractInstance.unversioned.template + } + + val cmd = if (byKey) { + val key = inputContract.contract.metadata.maybeKey + .map(_.key) + .getOrElse( + throw InvalidViewParticipantData( + "Flag byKey set on an exercise of a contract without key." + ) + ) + LfExerciseByKeyCommand( + templateId = templateId, + contractKey = key, + choiceId = choice, + argument = chosenValue, + ) + } else { + LfExerciseCommand( + templateId = templateId, + interfaceId = interfaceId, + contractId = inputContractId, + choiceId = choice, + argument = chosenValue, + ) + } + RootAction(cmd, actors, failed) + + case FetchActionDescription(inputContractId, actors, byKey, _version) => + val inputContract = coreInputs.getOrElse( + inputContractId, + throw InvalidViewParticipantData( + show"Input contract $inputContractId of the Fetch root action is not declared as core input." + ), + ) + val templateId = inputContract.contract.contractInstance.unversioned.template + val cmd = if (byKey) { + val key = inputContract.contract.metadata.maybeKey + .map(_.key) + .getOrElse( + throw InvalidViewParticipantData( + "Flag byKey set on a fetch of a contract without key." + ) + ) + LfFetchByKeyCommand(templateId = templateId, key = key) + } else { + LfFetchCommand(templateId = templateId, coid = inputContractId) + } + RootAction(cmd, actors, failed = false) + + case LookupByKeyActionDescription(key, _version) => + val keyResolution = resolvedKeys.getOrElse( + key, + throw InvalidViewParticipantData( + show"Key $key of LookupByKey root action is not resolved." + ), + ) + val maintainers = keyResolution match { + case AssignedKey(contractId) => checked(coreInputs(contractId)).maintainers + case FreeKey(maintainers) => maintainers + } + + RootAction( + LfLookupByKeyCommand(templateId = key.templateId, contractKey = key.key), + maintainers, + failed = false, + ) + } + + @transient override protected lazy val companionObj: ViewParticipantData.type = + ViewParticipantData + + private[ViewParticipantData] def toProtoV3: v3.ViewParticipantData = v3.ViewParticipantData( + coreInputs = coreInputs.values.map(_.toProtoV1).toSeq, + createdCore = createdCore.map(_.toProtoV1), + createdInSubviewArchivedInCore = createdInSubviewArchivedInCore.toSeq.map(_.toProtoPrimitive), + resolvedKeys = resolvedKeys.toList.map { case (k, res) => ResolvedKey(k, res).toProtoV3 }, + actionDescription = Some(actionDescription.toProtoV2), + rollbackContext = if (rollbackContext.isEmpty) None else Some(rollbackContext.toProtoV3), + salt = Some(salt.toProtoV0), + ) + + override protected[this] def toByteStringUnmemoized: ByteString = + super[HasProtocolVersionedWrapper].toByteString + + override def hashPurpose: HashPurpose = HashPurpose.ViewParticipantData + + override def pretty: Pretty[ViewParticipantData] = prettyOfClass( + paramIfNonEmpty("core inputs", _.coreInputs), + paramIfNonEmpty("created core", _.createdCore), + paramIfNonEmpty("created in subview, archived in core", _.createdInSubviewArchivedInCore), + paramIfNonEmpty("resolved keys", _.resolvedKeys), + param("action description", _.actionDescription), + param("rollback context", _.rollbackContext), + param("salt", _.salt), + ) + + /** Extends [[resolvedKeys]] with the maintainers of assigned keys */ + val resolvedKeysWithMaintainers: Map[LfGlobalKey, KeyResolutionWithMaintainers] = + resolvedKeys.fmap { + case assigned @ AssignedKey(contractId) => + val maintainers = + // checked by `inconsistentAssignedKey` above + checked( + coreInputs.getOrElse( + contractId, + throw InvalidViewParticipantData( + s"No input contract $contractId for a resolved key found" + ), + ) + ).maintainers + AssignedKeyWithMaintainers(contractId, maintainers)(assigned.version) + case free @ FreeKey(_) => free + } + + @VisibleForTesting + def copy( + coreInputs: Map[LfContractId, InputContract] = this.coreInputs, + createdCore: Seq[CreatedContract] = this.createdCore, + createdInSubviewArchivedInCore: Set[LfContractId] = this.createdInSubviewArchivedInCore, + resolvedKeys: Map[LfGlobalKey, SerializableKeyResolution] = this.resolvedKeys, + actionDescription: ActionDescription = this.actionDescription, + rollbackContext: RollbackContext = this.rollbackContext, + salt: Salt = this.salt, + ): ViewParticipantData = + ViewParticipantData( + coreInputs, + createdCore, + createdInSubviewArchivedInCore, + resolvedKeys, + actionDescription, + rollbackContext, + salt, + )(hashOps, representativeProtocolVersion, None) +} + +object ViewParticipantData + extends HasMemoizedProtocolVersionedWithContextCompanion[ViewParticipantData, HashOps] { + override val name: String = "ViewParticipantData" + + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(3) -> VersionedProtoConverter(ProtocolVersion.v30)(v3.ViewParticipantData)( + supportedProtoVersionMemoized(_)(fromProtoV3), + _.toProtoV3.toByteString, + ) + ) + + /** Creates a view participant data. + * + * @throws InvalidViewParticipantData + * if [[ViewParticipantData.createdCore]] contains two elements with the same contract id, + * if [[ViewParticipantData.coreInputs]]`(id).contractId != id` + * if [[ViewParticipantData.createdInSubviewArchivedInCore]] overlaps with [[ViewParticipantData.createdCore]]'s ids or [[ViewParticipantData.coreInputs]] + * if [[ViewParticipantData.coreInputs]] does not contain the resolved contract ids in [[ViewParticipantData.resolvedKeys]] + * if [[ViewParticipantData.createdCore]] creates a contract with a key that is not in [[ViewParticipantData.resolvedKeys]] + * if the [[ViewParticipantData.actionDescription]] is a [[com.digitalasset.canton.data.ActionDescription.CreateActionDescription]] + * and the created id is not the first contract ID in [[ViewParticipantData.createdCore]] + * if the [[ViewParticipantData.actionDescription]] is a [[com.digitalasset.canton.data.ActionDescription.ExerciseActionDescription]] + * or [[com.digitalasset.canton.data.ActionDescription.FetchActionDescription]] and the input contract is not in [[ViewParticipantData.coreInputs]] + * if the [[ViewParticipantData.actionDescription]] is a [[com.digitalasset.canton.data.ActionDescription.LookupByKeyActionDescription]] + * and the key is not in [[ViewParticipantData.resolvedKeys]]. + * @throws com.digitalasset.canton.serialization.SerializationCheckFailed if this instance cannot be serialized + */ + @throws[SerializationCheckFailed[com.daml.lf.value.ValueCoder.EncodeError]] + def tryCreate(hashOps: HashOps)( + coreInputs: Map[LfContractId, InputContract], + createdCore: Seq[CreatedContract], + createdInSubviewArchivedInCore: Set[LfContractId], + resolvedKeys: Map[LfGlobalKey, SerializableKeyResolution], + actionDescription: ActionDescription, + rollbackContext: RollbackContext, + salt: Salt, + protocolVersion: ProtocolVersion, + ): ViewParticipantData = + ViewParticipantData( + coreInputs, + createdCore, + createdInSubviewArchivedInCore, + resolvedKeys, + actionDescription, + rollbackContext, + salt, + )(hashOps, protocolVersionRepresentativeFor(protocolVersion), None) + + /** Creates a view participant data. + * + * Yields `Left(...)` + * if [[ViewParticipantData.createdCore]] contains two elements with the same contract id, + * if [[ViewParticipantData.coreInputs]]`(id).contractId != id` + * if [[ViewParticipantData.createdInSubviewArchivedInCore]] overlaps with [[ViewParticipantData.createdCore]]'s ids or [[ViewParticipantData.coreInputs]] + * if [[ViewParticipantData.coreInputs]] does not contain the resolved contract ids in [[ViewParticipantData.resolvedKeys]] + * if [[ViewParticipantData.createdCore]] creates a contract with a key that is not in [[ViewParticipantData.resolvedKeys]] + * if the [[ViewParticipantData.actionDescription]] is a [[com.digitalasset.canton.data.ActionDescription.CreateActionDescription]] + * and the created id is not the first contract ID in [[ViewParticipantData.createdCore]] + * if the [[ViewParticipantData.actionDescription]] is a [[com.digitalasset.canton.data.ActionDescription.ExerciseActionDescription]] + * or [[com.digitalasset.canton.data.ActionDescription.FetchActionDescription]] and the input contract is not in [[ViewParticipantData.coreInputs]] + * if the [[ViewParticipantData.actionDescription]] is a [[com.digitalasset.canton.data.ActionDescription.LookupByKeyActionDescription]] + * and the key is not in [[ViewParticipantData.resolvedKeys]]. + * if this instance cannot be serialized. + */ + def create(hashOps: HashOps)( + coreInputs: Map[LfContractId, InputContract], + createdCore: Seq[CreatedContract], + createdInSubviewArchivedInCore: Set[LfContractId], + resolvedKeys: Map[LfGlobalKey, SerializableKeyResolution], + actionDescription: ActionDescription, + rollbackContext: RollbackContext, + salt: Salt, + protocolVersion: ProtocolVersion, + ): Either[String, ViewParticipantData] = + returnLeftWhenInitializationFails( + ViewParticipantData.tryCreate(hashOps)( + coreInputs, + createdCore, + createdInSubviewArchivedInCore, + resolvedKeys, + actionDescription, + rollbackContext, + salt, + protocolVersion, + ) + ) + + private[this] def returnLeftWhenInitializationFails[A](initialization: => A): Either[String, A] = + try { + Right(initialization) + } catch { + case InvalidViewParticipantData(message) => Left(message) + case SerializationCheckFailed(err) => Left(err.toString) + } + + private def fromProtoV3(hashOps: HashOps, dataP: v3.ViewParticipantData)( + bytes: ByteString + ): ParsingResult[ViewParticipantData] = { + val v3.ViewParticipantData( + saltP, + coreInputsP, + createdCoreP, + createdInSubviewArchivedInCoreP, + resolvedKeysP, + actionDescriptionP, + rbContextP, + ) = dataP + + for { + coreInputsSeq <- coreInputsP.traverse(InputContract.fromProtoV1) + coreInputs = coreInputsSeq.view + .map(inputContract => inputContract.contract.contractId -> inputContract) + .toMap + createdCore <- createdCoreP.traverse(CreatedContract.fromProtoV1) + createdInSubviewArchivedInCore <- createdInSubviewArchivedInCoreP + .traverse(ProtoConverter.parseLfContractId) + resolvedKeys <- resolvedKeysP.traverse( + ResolvedKey.fromProtoV3(_).map(rk => rk.key -> rk.resolution) + ) + resolvedKeysMap = resolvedKeys.toMap + actionDescription <- ProtoConverter + .required("action_description", actionDescriptionP) + .flatMap(ActionDescription.fromProtoV2) + + salt <- ProtoConverter + .parseRequired(Salt.fromProtoV0, "salt", saltP) + .leftMap(_.inField("salt")) + + rollbackContext <- RollbackContext + .fromProtoV0(rbContextP) + .leftMap(_.inField("rollbackContext")) + + viewParticipantData <- returnLeftWhenInitializationFails( + ViewParticipantData( + coreInputs = coreInputs, + createdCore = createdCore, + createdInSubviewArchivedInCore = createdInSubviewArchivedInCore.toSet, + resolvedKeys = resolvedKeysMap, + actionDescription = actionDescription, + rollbackContext = rollbackContext, + salt = salt, + )(hashOps, protocolVersionRepresentativeFor(ProtoVersion(3)), Some(bytes)) + ).leftMap(ProtoDeserializationError.OtherError) + } yield viewParticipantData + } + + final case class RootAction(command: LfCommand, authorizers: Set[LfPartyId], failed: Boolean) + + /** Indicates an attempt to create an invalid [[ViewParticipantData]]. */ + final case class InvalidViewParticipantData(message: String) extends RuntimeException(message) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewPosition.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewPosition.scala new file mode 100644 index 0000000000..2b410cd3dc --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewPosition.scala @@ -0,0 +1,191 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.Order +import cats.instances.list.* +import com.digitalasset.canton.data.ViewPosition.MerklePathElement +import com.digitalasset.canton.data.ViewPosition.MerkleSeqIndex.Direction +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.v2 +import com.digitalasset.canton.serialization.DeterministicEncoding +import com.google.protobuf.ByteString + +/** A position encodes the path from a view in a transaction tree to its root. + * The encoding must not depend on the hashes of the nodes. + * + * @param position The path from the view to the root as a singly-linked list. + * The path starts at the view rather than the root so that paths to the root can + * be shared. + */ +final case class ViewPosition(position: List[MerklePathElement]) extends PrettyPrinting { + + /** Adds a [[ViewPosition.MerklePathElement]] at the start of the path. */ + def +:(index: MerklePathElement): ViewPosition = new ViewPosition(index :: position) + + def encodeDeterministically: ByteString = + DeterministicEncoding.encodeSeqWith(position)(_.encodeDeterministically) + + /** Reverse the position, as well as all contained MerkleSeqIndex path elements */ + def reverse: ViewPositionFromRoot = ViewPositionFromRoot(position.reverse.map(_.reverse)) + + def toProtoV2: v2.ViewPosition = v2.ViewPosition(position = position.map(_.toProtoV2)) + + override def pretty: Pretty[ViewPosition] = prettyOfClass(unnamedParam(_.position.mkShow())) +} + +/** Same as [[ViewPosition]], with the position directed from the root to the leaf */ +final case class ViewPositionFromRoot(position: List[MerklePathElement]) extends AnyVal { + def isTopLevel: Boolean = position.size == 1 + def isEmpty: Boolean = position.isEmpty +} + +object ViewPosition { + + /** The root [[ViewPosition]] has an empty path. */ + val root: ViewPosition = new ViewPosition(List.empty[MerklePathElement]) + + implicit def prettyViewPosition: Pretty[ViewPosition] = { + import com.digitalasset.canton.logging.pretty.Pretty.* + prettyOfClass(unnamedParam(_.position)) + } + + /** Will fail with an exception, if used to compare `ListIndex` with `MerkleSeqIndex` or `MerkleSeqIndexFromRoot`. + */ + private[canton] val orderViewPosition: Order[ViewPosition] = + Order.by((_: ViewPosition).position.reverse)( + catsKernelStdOrderForList(MerklePathElement.orderMerklePathElement) + ) + + def fromProtoV2(viewPositionP: v2.ViewPosition): ViewPosition = { + val v2.ViewPosition(positionP) = viewPositionP + val position = positionP.map(MerkleSeqIndex.fromProtoV2).toList + ViewPosition(position) + } + + /** A single element on a path through a Merkle tree. */ + sealed trait MerklePathElement extends Product with Serializable with PrettyPrinting { + def encodeDeterministically: ByteString + def reverse: MerklePathElement + + def toProtoV2: v2.MerkleSeqIndex + } + + /** For [[MerkleTreeInnerNode]]s which branch to a list of subviews, + * the subtree is identified by the index in the list of subviews. + */ + final case class ListIndex(index: Int) extends MerklePathElement { + override def encodeDeterministically: ByteString = + DeterministicEncoding + .encodeByte(MerklePathElement.ListIndexPrefix) + .concat(DeterministicEncoding.encodeInt(index)) + + override def pretty: Pretty[ListIndex] = prettyOfString(_.index.toString) + + override lazy val reverse: ListIndex = this + + override def toProtoV2: v2.MerkleSeqIndex = + throw new UnsupportedOperationException( + "ListIndex is for legacy use only and should not be serialized" + ) + } + + /** A leaf position in a [[MerkleSeq]], encodes as a path of directions from the leaf to the root. + * The path is directed from the leaf to the root such that common subpaths can be shared. + */ + final case class MerkleSeqIndex(index: List[Direction]) extends MerklePathElement { + override def encodeDeterministically: ByteString = + DeterministicEncoding + .encodeByte(MerklePathElement.MerkleSeqIndexPrefix) + .concat(DeterministicEncoding.encodeSeqWith(index)(_.encodeDeterministically)) + + override def pretty: Pretty[MerkleSeqIndex] = + prettyOfString(_ => index.reverse.map(_.show).mkString("")) + + override lazy val reverse: MerkleSeqIndexFromRoot = MerkleSeqIndexFromRoot(index.reverse) + + override def toProtoV2: v2.MerkleSeqIndex = + v2.MerkleSeqIndex(isRight = index.map(_ == Direction.Right)) + } + + /** Same as [[MerkleSeqIndex]], with the position directed from the root to the leaf */ + final case class MerkleSeqIndexFromRoot(index: List[Direction]) extends MerklePathElement { + override def encodeDeterministically: ByteString = + throw new UnsupportedOperationException( + "MerkleSeqIndexFromRoot is for internal use only and should not be encoded" + ) + + override def pretty: Pretty[MerkleSeqIndexFromRoot] = + prettyOfString(_ => index.map(_.show).mkString("")) + + override lazy val reverse: MerkleSeqIndex = MerkleSeqIndex(index.reverse) + + def toProtoV2: v2.MerkleSeqIndex = throw new UnsupportedOperationException( + "MerkleSeqIndexFromRoot is for internal use only and should not be serialized" + ) + } + + object MerklePathElement { + // Prefixes for the deterministic encoding of Merkle child indices. + // Must be unique to prevent collisions of view position encodings + private[ViewPosition] val ListIndexPrefix: Byte = 1 + private[ViewPosition] val MerkleSeqIndexPrefix: Byte = 2 + + /** Will throw if used to compare `ListIndex` with `MerkleSeqIndex` or `MerkleSeqIndexFromRoot`. + */ + private[data] val orderMerklePathElement: Order[MerklePathElement] = Order.from { + case (ListIndex(index1), ListIndex(index2)) => + implicitly[Order[Int]].compare(index1, index2) + + case (MerkleSeqIndexFromRoot(index1), MerkleSeqIndexFromRoot(index2)) => + implicitly[Order[List[Direction]]].compare(index1, index2) + + case (MerkleSeqIndex(index1), element2) => + orderMerklePathElement.compare(MerkleSeqIndexFromRoot(index1.reverse), element2) + + case (element1, MerkleSeqIndex(index2)) => + orderMerklePathElement.compare(element1, MerkleSeqIndexFromRoot(index2.reverse)) + + case (element1, element2) => + throw new UnsupportedOperationException( + s"Unable to compare ${element1.getClass.getSimpleName} with ${element2.getClass.getSimpleName}." + ) + } + } + + object MerkleSeqIndex { + sealed trait Direction extends Product with Serializable with PrettyPrinting { + def encodeDeterministically: ByteString + } + object Direction { + + implicit val orderDirection: Order[Direction] = Order.by { + case Left => 0 + case Right => 1 + } + + case object Left extends Direction { + override def encodeDeterministically: ByteString = DeterministicEncoding.encodeByte(0) + + override def pretty: Pretty[Left.type] = prettyOfString(_ => "L") + } + + case object Right extends Direction { + override def encodeDeterministically: ByteString = DeterministicEncoding.encodeByte(1) + + override def pretty: Pretty[Right.type] = prettyOfString(_ => "R") + } + } + + def fromProtoV2(merkleSeqIndexP: v2.MerkleSeqIndex): MerkleSeqIndex = { + val v2.MerkleSeqIndex(isRightP) = merkleSeqIndexP + MerkleSeqIndex(isRightP.map(if (_) Direction.Right else Direction.Left).toList) + } + } + + def isDescendant(descendant: ViewPosition, ancestor: ViewPosition): Boolean = { + descendant.position.size >= ancestor.position.size && + descendant.position.drop(descendant.position.size - ancestor.position.size) == ancestor.position + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewTree.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewTree.scala new file mode 100644 index 0000000000..2d575701df --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewTree.scala @@ -0,0 +1,47 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.{RootHash, ViewHash} +import com.digitalasset.canton.topology.{DomainId, MediatorRef} + +/** Common supertype of all view trees that are sent as [[com.digitalasset.canton.protocol.messages.EncryptedViewMessage]]s */ +trait ViewTree extends PrettyPrinting { + + /** The informees of the view in the tree */ + def informees: Set[Informee] + + /** Return the hash whose signature is to be included in the [[com.digitalasset.canton.protocol.messages.EncryptedViewMessage]] */ + def toBeSigned: Option[RootHash] + + /** The hash of the view */ + def viewHash: ViewHash + + def viewPosition: ViewPosition + + /** The root hash of the view tree. + * + * Two view trees with the same [[rootHash]] must also have the same [[domainId]] and [[mediator]] + * (except for hash collisions). + */ + def rootHash: RootHash + + /** The domain to which the [[com.digitalasset.canton.protocol.messages.EncryptedViewMessage]] should be sent to */ + def domainId: DomainId + + /** The mediator that is responsible for coordinating this request */ + def mediator: MediatorRef + + override def pretty: Pretty[this.type] +} + +/** Supertype of [[FullTransferOutTree]] and [[FullTransferInTree]] + */ +trait TransferViewTree extends ViewTree { + def submitterMetadata: TransferSubmitterMetadata + + val viewPosition: ViewPosition = + ViewPosition.root // Use a dummy value, as there is only one view. +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewType.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewType.scala new file mode 100644 index 0000000000..882681a042 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/ViewType.scala @@ -0,0 +1,67 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import com.digitalasset.canton.ProtoDeserializationError.{FieldNotSet, ValueConversionError} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.{RequestProcessor, v0} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.version.HasVersionedToByteString + +/** Reifies the subclasses of [[ViewTree]] as values */ +// This trait does not extend ProtoSerializable because v0.EncryptedViewMessage.ViewType is an enum, not a message. +sealed trait ViewType extends Product with Serializable with PrettyPrinting { + + /** The subclass of [[ViewTree]] that is reified. */ + type View <: ViewTree with HasVersionedToByteString + + type FullView <: ViewTree + + type Processor = RequestProcessor[this.type] + + def toProtoEnum: v0.ViewType + + override def pretty: Pretty[ViewType.this.type] = prettyOfObject[ViewType.this.type] +} + +// This trait is not sealed so that we can extend it for unit testing +trait ViewTypeTest extends ViewType + +object ViewType { + + def fromProtoEnum: v0.ViewType => ParsingResult[ViewType] = { + case v0.ViewType.TransactionViewType => Right(TransactionViewType) + case v0.ViewType.TransferOutViewType => Right(TransferOutViewType) + case v0.ViewType.TransferInViewType => Right(TransferInViewType) + case v0.ViewType.MissingViewType => Left(FieldNotSet("viewType")) + case v0.ViewType.Unrecognized(value) => + Left(ValueConversionError("viewType", s"Unrecognized value $value")) + } + + case object TransactionViewType extends ViewType { + override type View = LightTransactionViewTree + + override type FullView = FullTransactionViewTree + + override def toProtoEnum: v0.ViewType = v0.ViewType.TransactionViewType + } + type TransactionViewType = TransactionViewType.type + + sealed trait TransferViewType extends ViewType { + type View <: TransferViewTree with HasVersionedToByteString + type FullView = View + } + + case object TransferOutViewType extends TransferViewType { + override type View = FullTransferOutTree + override def toProtoEnum: v0.ViewType = v0.ViewType.TransferOutViewType + } + type TransferOutViewType = TransferOutViewType.type + + case object TransferInViewType extends TransferViewType { + override type View = FullTransferInTree + override def toProtoEnum: v0.ViewType = v0.ViewType.TransferInViewType + } + type TransferInViewType = TransferInViewType.type +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/Witnesses.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/Witnesses.scala new file mode 100644 index 0000000000..48a11dad8c --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/data/Witnesses.scala @@ -0,0 +1,89 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.data + +import cats.data.EitherT +import cats.syntax.foldable.* +import com.daml.nonempty.{NonEmpty, NonEmptyUtil} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.sequencing.protocol.{ + MemberRecipient, + ParticipantsOfParty, + Recipients, + RecipientsTree, +} +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.client.PartyTopologySnapshotClient + +import scala.concurrent.{ExecutionContext, Future} + +/** Encodes the hierarchy of the witnesses of a view. + * + * By convention, the order is: the view's informees are at the head of the list, then the parent's views informees, + * then the grandparent's, etc. + */ +final case class Witnesses(unwrap: NonEmpty[Seq[Set[Informee]]]) { + import Witnesses.* + + def prepend(informees: Set[Informee]) = Witnesses(informees +: unwrap) + + /** Derive a recipient tree that mirrors the given hierarchy of witnesses. */ + def toRecipients( + topology: PartyTopologySnapshotClient + )(implicit ec: ExecutionContext): EitherT[Future, InvalidWitnesses, Recipients] = + for { + recipientsList <- unwrap.forgetNE.foldLeftM(Seq.empty[RecipientsTree]) { + (children, informees) => + val parties = informees.map(_.party).toList + for { + informeeParticipants <- EitherT + .right[InvalidWitnesses]( + topology + .activeParticipantsOfParties(parties) + ) + _ <- { + val informeesWithNoActiveParticipants = + informeeParticipants + .collect { + case (party, participants) if participants.isEmpty => party + } + EitherT.cond[Future]( + informeesWithNoActiveParticipants.isEmpty, + (), + InvalidWitnesses( + s"Found no active participants for informees: $informeesWithNoActiveParticipants" + ), + ) + } + partiesWithGroupAddressing <- EitherT.right( + topology.partiesWithGroupAddressing(parties) + ) + recipients = informeeParticipants.toList.flatMap { case (party, participants) => + if (partiesWithGroupAddressing.contains(party)) + Seq(ParticipantsOfParty(PartyId.tryFromLfParty(party))) + else + participants.map(MemberRecipient) + }.toSet + + informeeRecipientSet <- EitherT.fromOption[Future]( + NonEmpty.from(recipients), + InvalidWitnesses(s"Empty set of witnesses given"), + ) + } yield Seq( + RecipientsTree(informeeRecipientSet, children) + ) + } + // recipientsList is non-empty, because unwrap is. + recipients = Recipients(NonEmptyUtil.fromUnsafe(recipientsList)) + } yield recipients + + def flatten: Set[Informee] = unwrap.foldLeft(Set.empty[Informee])(_ union _) + +} + +case object Witnesses { + final case class InvalidWitnesses(message: String) extends PrettyPrinting { + override def pretty: Pretty[InvalidWitnesses] = prettyOfClass(unnamedParam(_.message.unquoted)) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/environment/CantonNodeParameters.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/environment/CantonNodeParameters.scala new file mode 100644 index 0000000000..9d770bb4e7 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/environment/CantonNodeParameters.scala @@ -0,0 +1,101 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.environment + +import com.digitalasset.canton.config.{ + BatchingConfig, + CachingConfigs, + LoggingConfig, + ProcessingTimeout, + QueryCostMonitoringConfig, +} +import com.digitalasset.canton.sequencing.client.SequencerClientConfig +import com.digitalasset.canton.time.NonNegativeFiniteDuration +import com.digitalasset.canton.tracing.TracingConfig +import com.digitalasset.canton.version.ProtocolVersion + +trait CantonNodeParameters extends CantonNodeParameters.General with CantonNodeParameters.Protocol + +object CantonNodeParameters { + trait General { + def tracing: TracingConfig + def delayLoggingThreshold: NonNegativeFiniteDuration + def logQueryCost: Option[QueryCostMonitoringConfig] + def loggingConfig: LoggingConfig + def enableAdditionalConsistencyChecks: Boolean + def enablePreviewFeatures: Boolean + def processingTimeouts: ProcessingTimeout + def sequencerClient: SequencerClientConfig + def cachingConfigs: CachingConfigs + def batchingConfig: BatchingConfig + def nonStandardConfig: Boolean + def dbMigrateAndStart: Boolean + def skipTopologyManagerSignatureValidation: Boolean + + } + object General { + final case class Impl( + tracing: TracingConfig, + delayLoggingThreshold: NonNegativeFiniteDuration, + logQueryCost: Option[QueryCostMonitoringConfig], + loggingConfig: LoggingConfig, + enableAdditionalConsistencyChecks: Boolean, + enablePreviewFeatures: Boolean, + processingTimeouts: ProcessingTimeout, + sequencerClient: SequencerClientConfig, + cachingConfigs: CachingConfigs, + batchingConfig: BatchingConfig, + nonStandardConfig: Boolean, + dbMigrateAndStart: Boolean, + skipTopologyManagerSignatureValidation: Boolean, + ) extends CantonNodeParameters.General + } + trait Protocol { + def devVersionSupport: Boolean + def dontWarnOnDeprecatedPV: Boolean + + /** The initial protocol version before connected to any domain, e.g., when creating the initial topology transactions. */ + def initialProtocolVersion: ProtocolVersion + + } + object Protocol { + final case class Impl( + devVersionSupport: Boolean, + dontWarnOnDeprecatedPV: Boolean, + initialProtocolVersion: ProtocolVersion, + ) extends CantonNodeParameters.Protocol + } +} + +trait HasGeneralCantonNodeParameters extends CantonNodeParameters.General { + + protected def general: CantonNodeParameters.General + + override def tracing: TracingConfig = general.tracing + override def delayLoggingThreshold: NonNegativeFiniteDuration = general.delayLoggingThreshold + override def logQueryCost: Option[QueryCostMonitoringConfig] = general.logQueryCost + override def loggingConfig: LoggingConfig = general.loggingConfig + override def enableAdditionalConsistencyChecks: Boolean = + general.enableAdditionalConsistencyChecks + override def enablePreviewFeatures: Boolean = general.enablePreviewFeatures + override def processingTimeouts: ProcessingTimeout = general.processingTimeouts + override def sequencerClient: SequencerClientConfig = general.sequencerClient + override def cachingConfigs: CachingConfigs = general.cachingConfigs + override def batchingConfig: BatchingConfig = general.batchingConfig + override def nonStandardConfig: Boolean = general.nonStandardConfig + override def dbMigrateAndStart: Boolean = general.dbMigrateAndStart + override def skipTopologyManagerSignatureValidation: Boolean = + general.skipTopologyManagerSignatureValidation + +} + +trait HasProtocolCantonNodeParameters extends CantonNodeParameters.Protocol { + + protected def protocol: CantonNodeParameters.Protocol + + def devVersionSupport: Boolean = protocol.devVersionSupport + def dontWarnOnDeprecatedPV: Boolean = protocol.dontWarnOnDeprecatedPV + def initialProtocolVersion: ProtocolVersion = protocol.initialProtocolVersion + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/CantonError.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/CantonError.scala new file mode 100644 index 0000000000..a1d880b23a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/CantonError.scala @@ -0,0 +1,272 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.error + +import com.daml.error.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.logging.ErrorLoggingContext +import com.google.rpc.error_details.ErrorInfo +import io.grpc.StatusRuntimeException + +import scala.util.Try +import scala.util.matching.Regex + +object ErrorCodeUtils { + + import cats.syntax.either.* + + /** regex suitable to parse an error code string and extract the error recoverability code + * the (?s) supports multi-line matches + */ + lazy val errorCodeCategoryRegexp: Regex = "(?s)^[0-9A-Z_]+\\(([0-9]+),[A-Za-z0-9]+\\).*".r + + def errorCategoryFromString(str: String): Option[ErrorCategory] = { + str match { + case errorCodeCategoryRegexp(retryability, _*) => + Either + .catchOnly[NumberFormatException](retryability.toInt) + .toOption + .flatMap(ErrorCategory.fromInt) + case _ => None + } + } + + def isError(str: String, errorCode: ErrorCode): Boolean = + str.startsWith(errorCode.id) + +} + +/** The main Canton error for everything that should be logged and notified + * + * PREFER [[CantonError]] OVER [[BaseCantonError]] IN ORDER TO LOG THE ERROR IMMEDIATELY UPON CREATION + * TO ENSURE WE DON'T LOSE THE ERROR MESSAGE. + * + * In many cases, we return errors that are communicated to clients as a Left. For such cases, + * we should use CantonError to report them. + * + * For an actual error instance, you should extend one of the given abstract error classes such as [[CantonError.Impl]] + * further below (or transaction error). + * + * There are two ways to communicate such an error: write it into a log or send it as a string to the user. + * In most cases, we'll do both: log the error appropriately locally and communicate it to the user + * by failing the api call with an error string. + * + * When we log the error, then we write: + * 1) ErrorCode + * 2) ErrorName (name of the class defining the error code) + * 3) The cause + * 4) The context + * + * The context is given by the following: + * 1) All arguments of the error case class turned into strings (which invokes pretty printing of the arguments) + * EXCEPT: we ignore arguments that have the following RESERVED name: cause, loggingContext, throwable. + * 2) The context of the logger (e.g. participant=participant1, domain=da) + * 3) The trace id. + */ +trait BaseCantonError extends BaseError { + + override def context: Map[String, String] = + super.context ++ BaseError.extractContext(this) + + // note that all of the following arguments must be constructor arguments, not body values + // as otherwise we won't be able to log on creation (parent class is initialized before derived class, + // but constructor arguments are initialized first). + // so anything using def, lay val and constructor arguments works. just not val. but best, just use + // [[CantonUserError]] or [[CantonInternalError]] + + def rpcStatusWithoutLoggingContext(): com.google.rpc.status.Status = rpcStatus()(NoLogging) + + def log()(implicit loggingContext: ErrorLoggingContext): Unit = logWithContext()(loggingContext) + + def asGrpcError(implicit loggingContext: ErrorLoggingContext): StatusRuntimeException = + code.asGrpcError(this)(loggingContext) + + def asGoogleGrpcStatus(implicit loggingContext: ErrorLoggingContext): com.google.rpc.Status = + code.asGrpcStatus(this)(loggingContext) + +} + +object CantonErrorResource { + + private lazy val all = + Seq(ContractId, ContractKey, DalfPackage, LedgerId, DomainId, DomainAlias, CommandId) + + def fromString(str: String): Option[ErrorResource] = all.find(_.asString == str) + + object ContractId extends ErrorResource { + def asString: String = "CONTRACT_ID" + } + object ContractKey extends ErrorResource { + def asString: String = "CONTRACT_KEY" + } + object DalfPackage extends ErrorResource { + def asString: String = "PACKAGE" + } + object LedgerId extends ErrorResource { + def asString: String = "LEDGER_ID" + } + object DomainId extends ErrorResource { + def asString: String = "DOMAIN_ID" + } + object DomainAlias extends ErrorResource { + def asString: String = "DOMAIN_ALIAS" + } + object CommandId extends ErrorResource { + def asString: String = "COMMAND_ID" + } +} + +/** [[CantonError]]s are logged immediately when they are created. Therefore, they usually expect + * an implicit [[com.digitalasset.canton.logging.ErrorLoggingContext]] to be around when they are created. + * If you are creating such an error in a class extending [[com.digitalasset.canton.logging.NamedLogging]], + * then the implicit function will provide you with such a context. If you don't have that context, then you can + * also use [[BaseCantonError]] and invoke the logging yourself at a later point in time (which is what we do, + * for example, with [[TransactionError]]). + */ +trait CantonError extends BaseCantonError { + + /** The logging context obtained when we created the error, usually passed in as implicit via [[com.digitalasset.canton.logging.NamedLogging]] */ + def loggingContext: ErrorLoggingContext + + /** Flag to control if an error should be logged at creation + * + * Generally, we do want to log upon creation, except in the case of "nested" or combined errors, + * where we just nest the error but don't want it to be logged twice. + * See [[com.digitalasset.canton.error.ParentCantonError]] as an example. + */ + def logOnCreation: Boolean = true + + def log(): Unit = logWithContext()(loggingContext) + + def asGrpcError: StatusRuntimeException = + code.asGrpcError(this)(loggingContext) + + // automatically log the error on generation + if (logOnCreation) { + log() + } + +} + +object BaseCantonError { + abstract class Impl( + override val cause: String, + override val throwableO: Option[Throwable] = None, + )(implicit override val code: ErrorCode) + extends BaseCantonError {} + + /** Custom matcher to extract [[com.google.rpc.error_details.ErrorInfo]] from [[com.google.protobuf.any.Any]] */ + object AnyToErrorInfo { + def unapply(any: com.google.protobuf.any.Any): Option[ErrorInfo] = + if (any.is(ErrorInfo)) { + Try(any.unpack(ErrorInfo)).toOption + } else None + } + + def statusErrorCodes(status: com.google.rpc.status.Status): Seq[String] = + status.details.collect { case AnyToErrorInfo(errorInfo) => errorInfo.reason } + + def isStatusErrorCode(errorCode: ErrorCode, status: com.google.rpc.status.Status): Boolean = + extractStatusErrorCodeMessage(errorCode, status).isDefined + + def extractStatusErrorCodeMessage( + errorCode: ErrorCode, + status: com.google.rpc.status.Status, + ): Option[String] = { + val code = errorCode.category.grpcCode.getOrElse( + throw new IllegalArgumentException(s"Error code $errorCode does not have a gRPC code") + ) + Option.when(status.code == code.value() && statusErrorCodes(status).contains(errorCode.id))( + status.message + ) + } +} + +object CantonError { + + abstract class Impl( + override val cause: String, + override val throwableO: Option[Throwable] = None, + )(implicit override val code: ErrorCode) + extends CantonError {} + + def stringFromContext(error: BaseError)(implicit loggingContext: ErrorLoggingContext): String = + error match { + case error: CombinedError[_] => + (if (error.errors.length > 1) error.cause + ": " else "") + error.orderedErrors + .map(stringFromContext(_)(loggingContext)) + .toList + .mkString(", ") + + case error => + val contextMap = error.context ++ loggingContext.properties + val errorCodeMsg = error.code.toMsg(error.cause, loggingContext.traceContext.traceId) + if (contextMap.nonEmpty) { + errorCodeMsg + "; " + ContextualizedErrorLogger.formatContextAsString(contextMap) + } else { + errorCodeMsg + } + } +} + +/** Mixing trait for nested errors + * + * The classic situation when we re-wrap errors: + * + * sealed trait CryptoError extends CantonError + * + * sealed trait ProcessingError extends CantonError + * + * // NOTE, this error is NOT created within an ErrorCode, as we just inherit the parent error + * case class CryptoNoBueno(someArgs: String, parent: CryptoError) extends ProcessingError + * with ParentCantonError[CryptoError] { + * // we can mixin our context variables + * override def mixinContext: Map[String, String] = Map("someArgs" -> someArgs) + * } + * + * Now in the following situation, the someCryptoOp method would generate the CryptoError. + * This CryptoError would be logged already (on creation) and therefore, the ParentCantonError + * disabled logging on creation. + * + * for { + * _ <- someCryptoOp(..).leftMap(CryptoNoBueno("oh nooo", _)) + * } yields () + */ +trait ParentCantonError[+T <: BaseCantonError] extends BaseCantonError { + + /** The parent error that we want to nest */ + def parent: T + + /** The context we want to mix-in */ + def mixinContext: Map[String, String] = Map() + + override def code: ErrorCode = parent.code + override def cause: String = parent.cause + override def context: Map[String, String] = parent.context ++ mixinContext + +} + +/** Combine several errors into one + * + * This is a rare case but can happen. In some cases, we don't have a single + * parent error like [[ParentCantonError]], but many of them. This trait can + * be used for such situations. + * + * Useful for situations with [[com.digitalasset.canton.util.CheckedT]] collecting + * several user errors. + */ +trait CombinedError[+T <: BaseCantonError] { + this: BaseCantonError => + + def loggingContext: ErrorLoggingContext + + def errors: NonEmpty[Seq[T]] + + lazy val orderedErrors: NonEmpty[Seq[T]] = errors.sortBy(_.code.category.rank) + + override def cause: String = s"A series of ${errors.length} failures occurred" + + override def code: ErrorCode = orderedErrors.head1.code + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/CantonErrorGroups.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/CantonErrorGroups.scala new file mode 100644 index 0000000000..5af9e789e5 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/CantonErrorGroups.scala @@ -0,0 +1,67 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.error + +import com.daml.error.{ErrorClass, ErrorGroup} + +object CantonErrorGroups { + + private implicit val errorClass: ErrorClass = ErrorClass.root() + + abstract class ConfigErrorGroup extends ErrorGroup() + + abstract class CommandErrorGroup extends ErrorGroup() + + abstract class HandshakeErrorGroup extends ErrorGroup + + abstract class ProtoDeserializationErrorGroup extends ErrorGroup + + abstract class SequencerErrorGroup extends ErrorGroup() + + abstract class SequencerSubscriptionErrorGroup extends ErrorGroup() + + abstract class MediatorErrorGroup extends ErrorGroup() + + abstract class GrpcErrorGroup extends ErrorGroup() + + object ParticipantErrorGroup extends ErrorGroup() { + + abstract class DomainConnectivityErrorGroup extends ErrorGroup() + abstract class SyncServiceErrorGroup extends ErrorGroup() + abstract class PackageServiceErrorGroup extends ErrorGroup() + abstract class PruningServiceErrorGroup extends ErrorGroup() + abstract class RepairServiceErrorGroup extends ErrorGroup() + + object TransactionErrorGroup extends ErrorGroup() { + // Errors emitted by Ledger Api server + abstract class LedgerApiErrorGroup extends ErrorGroup() + // TransactionInjectErrors are initial injection errors resulting from the canton sync service + abstract class InjectionErrorGroup extends ErrorGroup() + // TransactionRoutingErrors are routing errors resulting from the domain router + abstract class RoutingErrorGroup extends ErrorGroup() + // TransactionSubmissionErrors are routing errors resulting from the transaction processor + abstract class SubmissionErrorGroup extends ErrorGroup() + // local rejections made by participants during transaction processing + abstract class LocalRejectionGroup extends ErrorGroup() + } + + // replicated participant errors + abstract class ReplicationErrorGroup extends ErrorGroup() + + abstract class AcsCommitmentErrorGroup extends ErrorGroup() + + abstract class AdminWorkflowServicesErrorGroup extends ErrorGroup() + + } + + object TopologyManagementErrorGroup extends ErrorGroup() { + abstract class TopologyManagerErrorGroup extends ErrorGroup() + abstract class TopologyDispatchingErrorGroup extends ErrorGroup() + } + + abstract class StorageErrorGroup extends ErrorGroup() + + abstract class ClockErrorGroup extends ErrorGroup() {} + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/DecodedRpcStatus.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/DecodedRpcStatus.scala new file mode 100644 index 0000000000..a3b1ebd176 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/DecodedRpcStatus.scala @@ -0,0 +1,147 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.error + +import cats.syntax.either.* +import com.daml.error.{ErrorCategory, ErrorResource} +import io.grpc.StatusRuntimeException + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration + +/** Decoded form of com.google.rpc.Status as generated by our errors + * + * We use com.google.rpc.Status (java) and com.google.rpc.status.Status (scala) to store + * and ship error information via GRPC. However, extracting this information from the + * object is a bit cumbersome. + * + * Therefore, we support the UX by providing a set of conversion utilities to make the information + * stored better accessible. + */ +final case class DecodedRpcStatus( + id: String, + category: ErrorCategory, + correlationId: Option[String], + retryIn: Option[Duration], + context: Map[String, String], + resources: Map[ErrorResource, Seq[String]], +) { + + def isRetryable: Boolean = retryIn.nonEmpty + +} + +object DecodedRpcStatus { + + private val empty = ( + None: Option[ErrorCategory], + DecodedRpcStatus("", ErrorCategory.SystemInternalAssumptionViolated, None, None, Map(), Map()), + ) + + private def accumulateRetryInfo(categoryO: Option[ErrorCategory], acc: DecodedRpcStatus)( + retrySecsAndNanos: Option[(Long, Int)] + ): (Option[ErrorCategory], DecodedRpcStatus) = + retrySecsAndNanos match { + case Some((seconds, nanos)) => + val dr = Duration.fromNanos(TimeUnit.SECONDS.toNanos(seconds) + nanos.toLong) + (categoryO, acc.copy(retryIn = Some(dr))) + case None => (categoryO, acc) + } + + private def accumulateResourceInfo( + categoryO: Option[ErrorCategory], + acc: DecodedRpcStatus, + )(resourceType: String, resourceName: String): (Option[ErrorCategory], DecodedRpcStatus) = + CantonErrorResource.fromString(resourceType).fold((categoryO, acc)) { resourceType => + val resourceItems = acc.resources.getOrElse(resourceType, Seq()) :+ resourceName + (categoryO, acc.copy(resources = acc.resources + (resourceType -> resourceItems))) + } + + private def accumulateErrorInfo( + categoryO: Option[ErrorCategory], + acc: DecodedRpcStatus, + )(reason: String, metadata: Map[String, String]): (Option[ErrorCategory], DecodedRpcStatus) = { + if (reason.nonEmpty) { + val category = (for { + catS <- metadata.get("category") + catI <- Either.catchOnly[NumberFormatException](catS.toInt).toOption + cat <- ErrorCategory.fromInt(catI) + } yield cat) + (category, acc.copy(id = reason, context = metadata.filterNot(_._1 == "category"))) + } else (categoryO, acc) + } + + private def finaliseDecoding( + acc: (Option[ErrorCategory], DecodedRpcStatus) + ): Option[DecodedRpcStatus] = + (acc match { + case (categoryO, status) => + categoryO.map(ct => status.copy(category = ct)) + }).filter(_.id.nonEmpty) + + def fromScalaStatus(status: com.google.rpc.status.Status): Option[DecodedRpcStatus] = + finaliseDecoding( + status.details + .foldLeft(empty) { + case ((categoryO, acc), e) if e.is(com.google.rpc.error_details.ResourceInfo) => + val ri = e.unpack(com.google.rpc.error_details.ResourceInfo) + accumulateResourceInfo(categoryO, acc)(ri.resourceType, ri.resourceName) + case ((categoryO, acc), e) if e.is(com.google.rpc.error_details.ErrorInfo) => + val ei = e.unpack(com.google.rpc.error_details.ErrorInfo) + accumulateErrorInfo(categoryO, acc)(ei.reason, ei.metadata) + case ((categoryO, acc), e) if e.is(com.google.rpc.error_details.RetryInfo) => + val ei = e.unpack(com.google.rpc.error_details.RetryInfo) + accumulateRetryInfo(categoryO, acc)(ei.retryDelay.map(x => (x.seconds, x.nanos))) + case ((categoryO, acc), e) if e.is(com.google.rpc.error_details.RequestInfo) => + val ri = e.unpack(com.google.rpc.error_details.RequestInfo) + (categoryO, acc.copy(correlationId = Some(ri.requestId))) + case (acc, _) => acc + } + ) + + def fromStatusRuntimeException(ex: StatusRuntimeException): Option[DecodedRpcStatus] = { + + Either + .catchOnly[IllegalArgumentException](io.grpc.protobuf.StatusProto.fromThrowable(ex)) + .toOption + // fromThrowable might return null + .flatMap(s => Option(s)) + .flatMap { status => + import scala.jdk.CollectionConverters.* + finaliseDecoding(status.getDetailsList.asScala.foldLeft(empty) { + + // extract retry interval + case ((categoryO, acc), e) if e.is(classOf[com.google.rpc.RetryInfo]) => + val retryInfo = e.unpack(classOf[com.google.rpc.RetryInfo]) + val retryDelay = if (retryInfo.hasRetryDelay) { + val dl = retryInfo.getRetryDelay + Some((dl.getSeconds, dl.getNanos)) + } else None + accumulateRetryInfo(categoryO, acc)(retryDelay) + + // extract error info + case ((categoryO, acc), e) if e.is(classOf[com.google.rpc.ErrorInfo]) => + val errorInfo = e.unpack(classOf[com.google.rpc.ErrorInfo]) + accumulateErrorInfo(categoryO, acc)( + errorInfo.getReason, + errorInfo.getMetadataMap.asScala.toMap, + ) + + // extract correlation-id + case ((categoryO, acc), e) if e.is(classOf[com.google.rpc.RequestInfo]) => + val ri = e.unpack(classOf[com.google.rpc.RequestInfo]) + (categoryO, acc.copy(correlationId = Some(ri.getRequestId))) + + // extract resource info + case ((categoryO, acc), e) if e.is(classOf[com.google.rpc.ResourceInfo]) => + val ri = e.unpack(classOf[com.google.rpc.ResourceInfo]) + accumulateResourceInfo(categoryO, acc)(ri.getResourceType, ri.getResourceName) + case (acc, _) => acc + }) + + } + + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/MediatorError.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/MediatorError.scala new file mode 100644 index 0000000000..b590e685c5 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/MediatorError.scala @@ -0,0 +1,114 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.error + +import com.daml.error.* +import com.digitalasset.canton.error.CantonErrorGroups.MediatorErrorGroup +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.v0 +import org.slf4j.event.Level + +sealed trait MediatorError extends Product with Serializable with PrettyPrinting + +object MediatorError extends MediatorErrorGroup { + + @Explanation( + """This rejection indicates that the transaction has been rejected by the mediator as it didn't receive enough confirmations within the participant response timeout. + The field "unresponsiveParties" in the error info contains the comma-separated list of parties that failed to send a response within the participant response timeout. This field is only present since protocol version 6""" + ) + @Resolution( + "Check that all involved participants are available and not overloaded." + ) + object Timeout + extends ErrorCode( + id = "MEDIATOR_SAYS_TX_TIMED_OUT", + ErrorCategory.ContentionOnSharedResources, + ) { + final case class Reject( + override val cause: String = Reject.defaultCause, + unresponsiveParties: String = "", + ) extends BaseCantonError.Impl(cause) + with MediatorError { + override def pretty: Pretty[Reject] = prettyOfClass( + param("code", _.code.id.unquoted), + param("cause", _.cause.unquoted), + param( + "unresponsive parties", + _.unresponsiveParties.unquoted, + _.unresponsiveParties.nonEmpty, + ), + ) + } + object Reject { + val defaultCause: String = + "Rejected transaction as the mediator did not receive sufficient confirmations within the expected timeframe." + } + } + + @Explanation( + """The mediator has received an invalid message (request or response). + |The message will be discarded. As a consequence, the underlying request may be rejected. + |No corruption of the ledger is to be expected. + |This error is to be expected after a restart or failover of a mediator.""" + ) + @Resolution("Address the cause of the error. Let the submitter retry the command.") + object InvalidMessage + extends ErrorCode( + "MEDIATOR_INVALID_MESSAGE", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + + override def logLevel: Level = Level.WARN + + final case class Reject( + override val cause: String, + _v0CodeP: v0.MediatorRejection.Code = v0.MediatorRejection.Code.Timeout, + ) extends BaseCantonError.Impl(cause) + with MediatorError { + override def pretty: Pretty[Reject] = prettyOfClass( + param("code", _.code.id.unquoted), + param("cause", _.cause.unquoted), + ) + } + } + + @Explanation( + """The mediator has received a malformed message. This may occur due to a bug at the sender of the message. + |The message will be discarded. As a consequence, the underlying request may be rejected. + |No corruption of the ledger is to be expected.""" + ) + @Resolution("Contact support.") + object MalformedMessage extends AlarmErrorCode("MEDIATOR_RECEIVED_MALFORMED_MESSAGE") { + + final case class Reject( + override val cause: String, + _v0CodeP: v0.MediatorRejection.Code = v0.MediatorRejection.Code.Timeout, + ) extends Alarm(cause) + with MediatorError + with BaseCantonError { + override def pretty: Pretty[Reject] = prettyOfClass( + param("code", _.code.id.unquoted), + param("cause", _.cause.unquoted), + ) + } + } + + @Explanation( + "Request processing failed due to a violation of internal invariants. It indicates a bug at the mediator." + ) + @Resolution("Contact support.") + object InternalError + extends ErrorCode( + "MEDIATOR_INTERNAL_ERROR", + ErrorCategory.SystemInternalAssumptionViolated, + ) { + + /** @param throwableO optional throwable that will not be serialized and is therefore not delivered to clients. + */ + final case class Reject( + override val cause: String, + override val throwableO: Option[Throwable] = None, + ) extends BaseCantonError.Impl(cause) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/TransactionError.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/TransactionError.scala new file mode 100644 index 0000000000..e454fc3a2a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/error/TransactionError.scala @@ -0,0 +1,51 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.error + +import com.daml.error.ErrorCode +import com.digitalasset.canton.ledger.participant.state.v2.SubmissionResult +import com.google.rpc.code.Code +import com.google.rpc.status.Status as RpcStatus + +trait TransactionError extends BaseCantonError { + + // Determines the value of the `definite_answer` key in the error details + def definiteAnswer: Boolean = false + + /** Parameter has no effect at the moment, as submission ranks are not supported. + * Setting to false for the time being. + */ + final override def definiteAnswerO: Option[Boolean] = Some(definiteAnswer) +} + +/** Transaction errors are derived from BaseCantonError and need to be logged explicitly */ +abstract class TransactionErrorImpl( + override val cause: String, + override val throwableO: Option[Throwable] = None, + override val definiteAnswer: Boolean = false, +)(implicit override val code: ErrorCode) + extends TransactionError + +trait TransactionParentError[T <: TransactionError] + extends TransactionError + with ParentCantonError[T] + +object TransactionError { + val NoErrorDetails = Seq.empty[com.google.protobuf.any.Any] + val NotSupported: SubmissionResult.SynchronousError = SubmissionResult.SynchronousError( + RpcStatus.of(Code.UNIMPLEMENTED.value, "Not supported", TransactionError.NoErrorDetails) + ) + val PassiveNode: SubmissionResult.SynchronousError = SubmissionResult.SynchronousError( + RpcStatus.of(Code.UNAVAILABLE.value, "Node is passive", TransactionError.NoErrorDetails) + ) + + def internalError(reason: String): SubmissionResult.SynchronousError = + SubmissionResult.SynchronousError(RpcStatus.of(Code.INTERNAL.value, reason, NoErrorDetails)) + + val shutdownError: SubmissionResult.SynchronousError = + SubmissionResult.SynchronousError( + RpcStatus.of(Code.CANCELLED.value, "Node is shutting down", NoErrorDetails) + ) + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/external/BackgroundRunner.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/external/BackgroundRunner.scala new file mode 100644 index 0000000000..a2b634f6d5 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/external/BackgroundRunner.scala @@ -0,0 +1,393 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.external + +import better.files.File +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.concurrent.Threading +import com.digitalasset.canton.config.{ProcessingTimeout, RequireTypes} +import com.digitalasset.canton.lifecycle.FlagCloseable +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.tracing.{NoTracing, TraceContext} +import com.digitalasset.canton.util.ErrorUtil +import com.digitalasset.canton.util.ShowUtil.* +import org.slf4j.event.Level + +import java.io.{IOException, InputStream, StringWriter} +import java.nio.BufferOverflowException +import java.util.concurrent.TimeUnit +import scala.annotation.tailrec +import scala.collection.concurrent.TrieMap +import scala.jdk.CollectionConverters.* + +/** Handler that exposes lifecycle methods for managing a background process. + * @tparam ProcessInfo type of information about the process that will show up in error messages + */ +class BackgroundRunnerHandler[ProcessInfo]( + timeouts: ProcessingTimeout, + val loggerFactory: NamedLoggerFactory, +) extends NamedLogging + with AutoCloseable + with NoTracing { + + private sealed trait ProcessHandle { + def info: ProcessInfo + } + private case class Configured( + name: String, + command: Seq[String], + addEnvironment: Map[String, String], + info: ProcessInfo, + ) extends ProcessHandle { + def start(): Running = + Running( + name, + runner = new BackgroundRunner(name, command, addEnvironment, timeouts, loggerFactory), + info, + ) + } + private case class Running(name: String, runner: BackgroundRunner, info: ProcessInfo) + extends ProcessHandle { + def kill(force: Boolean = false): Configured = { + runner.kill(force) + Configured(name, runner.command, runner.addEnvironment, info) + } + def restart(): Running = { + Running(name, runner.restart(), info) + } + } + + private val external = new TrieMap[String, ProcessHandle]() + + def tryAdd( + instanceName: String, + command: Seq[String], + addEnvironment: Map[String, String], + info: ProcessInfo, + manualStart: Boolean, + )(implicit + traceContext: TraceContext + ): Unit = { + ErrorUtil.requireArgument( + !external.contains(instanceName), + s"key already exists ${instanceName}", + ) + ErrorUtil.requireArgument( + command.nonEmpty, + s"you've supplied empty commands for ${instanceName}", + ) + val configured = Configured(instanceName, command, addEnvironment, info) + external.put(instanceName, if (!manualStart) configured.start() else configured).discard + } + + /** Stop and remove a background process. Idempotent as it doesn't require that the background process was + * previously added. + */ + def stopAndRemove(instanceName: String): Unit = { + val prev = external.remove(instanceName) + prev match { + case Some(processHandle: Running) => processHandle.kill().discard + case _ => () + } + } + + def tryIsRunning(instanceName: String): Boolean = { + external.get(instanceName) match { + case Some(_: Configured) => false + case Some(_: Running) => true + case None => + ErrorUtil.internalError(new IllegalStateException(s"${instanceName} is not registered")) + } + } + + def tryStart(instanceName: String): Unit = { + perform( + instanceName, + { + case a: Configured => + noTracingLogger.info(s"Starting external process for ${instanceName}") + a.start() + case Running(_, _, _) => + ErrorUtil.internalError( + new IllegalStateException( + s"can not start ${instanceName} as instance is already running" + ) + ) + }, + ) + } + + private def perform(instanceName: String, func: ProcessHandle => ProcessHandle): Unit = { + external.get(instanceName) match { + case Some(item) => + external.update(instanceName, func(item)) + case None => + ErrorUtil.internalError( + new IllegalArgumentException( + s"no such instance ${instanceName} configured as remote running instance. have ${external.keys}" + ) + ) + } + } + + def tryKill(instanceName: String, force: Boolean = true): Unit = { + perform( + instanceName, + { + case x: Running => + noTracingLogger.info(s"Stopping external process for ${instanceName} (force=${force})") + x.kill(force) + case a: Configured => + ErrorUtil.internalError( + new IllegalStateException(s"can not kill ${instanceName} as instance is not running") + ) + }, + ) + } + + def tryRestart(instanceName: String): Unit = { + perform( + instanceName, + { + case x: Running => + noTracingLogger.info(s"Restarting external process for ${instanceName}") + x.restart() + case Configured(_, _, _, _) => + ErrorUtil.internalError( + new IllegalStateException(s"can not kill ${instanceName} as instance is not running") + ) + }, + ) + } + + def tryInfo(instanceName: String): ProcessInfo = { + external + .getOrElse( + instanceName, + ErrorUtil.internalError(new IllegalArgumentException(s"no such instance ${instanceName}")), + ) + .info + } + + def exists(instanceName: String): Boolean = external.keySet.contains(instanceName) + + def killAndRemove(): Unit = { + logger.info("Killing background processes due to shutdown") + external.values.foreach { + case Configured(_, _, _, _) => () + case Running(_, runner, _) => + runner.kill() + } + external.clear() + } + + override def close(): Unit = killAndRemove() +} + +class BackgroundRunner( + val name: String, + val command: Seq[String], + val addEnvironment: Map[String, String], + override protected val timeouts: ProcessingTimeout, + val loggerFactory: NamedLoggerFactory, + waitBeforeRestartMs: Int = 250, +) extends NamedLogging + with FlagCloseable { + + import BackgroundRunner.* + + private def dumpOutputToLogger(parent: InputStream, level: Level): Unit = { + @SuppressWarnings(Array("org.wartremover.warts.Var", "org.wartremover.warts.While")) + class CopyOutput() extends NoTracing with Runnable { + val buf = new StringWriter() + + override def run(): Unit = { + try { + var b = parent.read() + while (b != -1) { + if (b == '\n' || buf.getBuffer.length() >= MaxLineLength) { + // strip the ansi color commands from the string + val msg = s"Output of ${name}: ${buf.toString}" + level match { + case Level.ERROR => logger.error(msg) + case Level.WARN => logger.warn(msg) + case Level.INFO => logger.info(msg) + case Level.DEBUG => logger.debug(msg) + case Level.TRACE => logger.trace(msg) + } + buf.getBuffer.setLength(0) + } else { // if this if-condition is taken 2^30 times in a row, a buffer overflow error occurs + buf.write(b) + } + b = parent.read() + } + } catch { + case e: IOException => + logger.debug(s"External process was closed ${e.getMessage}") + case e: BufferOverflowException => + logger.debug( + "A BufferOverflowException occurred when writing to the external log file. " + + "The cause is likely that there is a configuration error that leads to the external process to fail," + + " and indefinitely output non-sense data to the output, leading to the buffer overflow exception. " + + s"To find the root cause error, you will likely need to check the logs of the external process $name" + + s"Error message of the exception: ${e.getMessage}" + ) + } + } + } + val tr = new Thread(new CopyOutput(), s"output-copy-${name}-${level}") + tr.setDaemon(true) + tr.start() + } + + private val pb = new ProcessBuilder(command.toList.asJava) + + pb.redirectOutput() + pb.redirectErrorStream() + addEnvironment.foreach { case (k, v) => + Option(pb.environment().put(k, v)) match { + case Some(prev) => noTracingLogger.debug(s"Changed ${k} to ${v} from ${prev}") + case None => noTracingLogger.debug(s"Set ${k} to ${v}") + } + } + + noTracingLogger.info(s"Starting command $name ${command.map(_.limit(160)).toString}") + private val rt = pb.start() + + dumpOutputToLogger(rt.getInputStream, Level.DEBUG) + dumpOutputToLogger(rt.getErrorStream, Level.INFO) + + def kill(force: Boolean = true): Unit = { + if (rt.isAlive) { + try { + if (!force) { + noTracingLogger.debug(s"Killing process $name normally") + val _ = rt.destroy() + } + if ( + force || !rt.waitFor(timeouts.shutdownProcessing.unwrap.toMillis, TimeUnit.MILLISECONDS) + ) { + noTracingLogger.info(s"Killing process $name forcibly") + val _ = rt.destroyForcibly() + } + } catch { + case ex: Throwable => noTracingLogger.error(s"Failed to kill forcibly: ${command}", ex) + } + } else { + noTracingLogger.warn(s"Process $name is already shut down") + } + } + + def restart(): BackgroundRunner = { + kill() + Threading.sleep(waitBeforeRestartMs.toLong) + noTracingLogger.info(s"Restarting background runner with ${command}") + new BackgroundRunner(name, command, addEnvironment, timeouts, loggerFactory) + } + + override protected def onClosed(): Unit = { + if (rt.isAlive) { + noTracingLogger.debug("Shutting down external process") + rt.destroy() + } + } + +} + +object BackgroundRunner { + private val MaxLineLength = 8192 +} + +object BackgroundRunnerHelpers { + + /** Yields the jvm params specifying the current classpath, e.g., `Seq("-cp", myClassPath)`. + * Excludes sbt dependencies. + * @throws java.lang.IllegalStateException if there is no file `classpath.txt` in the working directory. + */ + def extractClassPathParams(): Seq[String] = { + loadIntelliJClasspath() match { + case Some(cp) => + Seq("-cp", cp) + case None => + val cpFile = tryGetClasspathFile() + Seq(s"@${cpFile.name}") + } + } + + private def loadIntelliJClasspath(): Option[String] = + Some(System.getProperty("java.class.path")).filter(!_.matches(".*sbt-launch.*\\.jar")) + + private def tryGetClasspathFile(): File = { + val cpFile = File(s"classpath.txt") + if (cpFile.exists()) { + cpFile + } else { + throw new IllegalStateException( + "Process is started using sbt, however you need to run `sbt dumpClassPath` before running external processes." + ) + } + } + + /** Yields a sequence with the elements of the current classpath. + * Excludes sbt dependencies. + * @throws java.lang.IllegalStateException if there is no file `classpath.txt` in the working directory. + */ + def extractClassPath(): Seq[String] = { + loadIntelliJClasspath() match { + case Some(cp) => cp.split(":").toSeq + case None => + val cpFile = tryGetClasspathFile() + cpFile.contentAsString.stripPrefix("-cp ").split(":").toSeq + } + } + + def createParticipantStartupFile( + targetFilename: String, + dar: Option[String], + urls: Seq[String], + ): Unit = { + val fw = new java.io.FileWriter(targetFilename) + val upload = dar + .map { filename => + s""" + | participant.dars.upload("${filename}") + | """.stripMargin + } + .getOrElse("") + fw.write(s""" + |participants.local.foreach { participant => + | participant.start() + | if(participant.domains.list_registered().length == 0) { + | """.stripMargin) + urls.zipWithIndex.foreach { case (url, index) => + val dn = s"domain${index}" + fw.write(" participant.domains.connect(\"" + dn + "\", \"" + url + "\")\n") + } + fw.write(s""" + | } else + | participant.domains.reconnect_all() + | ${upload} + |} + """.stripMargin) + fw.close() + } + @tailrec + def waitUntilUp(port: RequireTypes.Port, retries: Int): Unit = { + try { + Threading.sleep(2000) + val socket = new java.net.Socket("localhost", port.unwrap) + socket.getInputStream.close() + socket.close() + println(s"process at port ${port} is active") + } catch { + case _: java.io.IOException => + if (retries > 0) { + waitUntilUp(port, retries - 1) + } else { + throw new RuntimeException(s"Unable to connect to ${port}") + } + } + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/AtomicHealthElement.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/AtomicHealthElement.scala new file mode 100644 index 0000000000..4c921c6a84 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/AtomicHealthElement.scala @@ -0,0 +1,55 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.health + +import cats.Eval +import com.daml.error.BaseError +import com.digitalasset.canton.health.ComponentHealthState.UnhealthyState +import com.digitalasset.canton.tracing.TraceContext + +/** Trait for [[HealthElement]] at the leaves of the health dependency tree. + * Maintains its own state instead of aggregating other components. + * + * @see CompositeHealthElement for the aggregating counterpart + */ +trait AtomicHealthElement extends HealthElement { + + /** Sets the state of this component and notifies its listeners */ + def reportHealthState(state: State)(implicit tc: TraceContext): Unit = + refreshState(Eval.now(state)) +} + +/** An [[AtomicHealthElement]] whose state is a [[ComponentHealthState]] */ +trait AtomicHealthComponent extends AtomicHealthElement with HealthComponent { + + /** Set the health state to Ok and if the previous state was unhealthy, log a message to inform about the resolution + * of the ongoing issue. + */ + def resolveUnhealthy()(implicit traceContext: TraceContext): Unit = + reportHealthState(ComponentHealthState.Ok()) + + /** Report that the component is now degraded. + * Note that this will override the component state, even if it is currently failed! + */ + def degradationOccurred(error: BaseError)(implicit tc: TraceContext): Unit = + reportHealthState(ComponentHealthState.Degraded(UnhealthyState(None, Some(error)))) + + /** Report that the component is now failed + */ + def failureOccurred(error: BaseError)(implicit tc: TraceContext): Unit = + reportHealthState(ComponentHealthState.Failed(UnhealthyState(None, Some(error)))) + + /** Report that the component is now degraded. + * Note that this will override the component state, even if it is currently failed! + */ + def degradationOccurred(error: String)(implicit tc: TraceContext): Unit = + reportHealthState(ComponentHealthState.degraded(error)) + + /** Report that the component is now failed + */ + def failureOccurred(error: String)(implicit tc: TraceContext): Unit = + reportHealthState(ComponentHealthState.failed(error)) +} + +trait CloseableAtomicHealthComponent extends CloseableHealthComponent with AtomicHealthComponent diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/CloseableHealthElement.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/CloseableHealthElement.scala new file mode 100644 index 0000000000..1bc46e8091 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/CloseableHealthElement.scala @@ -0,0 +1,22 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.health + +import com.digitalasset.canton.lifecycle.FlagCloseable + +/** A [[HealthElement]] that is its own [[com.digitalasset.canton.lifecycle.FlagCloseable]]. + * Use this when the health reporting shall use inheritance over composition. + * + * When combining different [[HealthElement]] traits, mix in this one first + * so that the [[com.digitalasset.canton.lifecycle.FlagCloseable]] gets initialized first. + */ +trait CloseableHealthElement extends FlagCloseable with HealthElement { + final override protected def associatedOnShutdownRunner: FlagCloseable = this +} + +/** Refines the state of a [[CloseableHealthElement]] to something convertible to a [[ComponentHealthState]] */ +trait CloseableHealthQuasiComponent extends CloseableHealthElement with HealthQuasiComponent + +/** Fixes the state of a [[CloseableHealthQuasiComponent]] to [[ComponentHealthState]] */ +trait CloseableHealthComponent extends CloseableHealthQuasiComponent with HealthComponent diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/ComponentHealthState.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/ComponentHealthState.scala new file mode 100644 index 0000000000..ea8e82e10e --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/ComponentHealthState.scala @@ -0,0 +1,151 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.health + +import com.daml.error.BaseError +import com.digitalasset.canton.health.ComponentHealthState.{Degraded, Failed, Ok} +import com.digitalasset.canton.health.admin.v0 as proto +import com.digitalasset.canton.logging.ErrorLoggingContext +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting, PrettyUtil} +import com.digitalasset.canton.util.ShowUtil +import io.circe.Encoder +import io.circe.generic.semiauto.deriveEncoder + +import scala.annotation.nowarn + +/** Generic State implementation of a component + * This can be used as a base health state for most component. + * However ComponentHealth (below) does not enforce the use of this class and a custom State class can be used instead + */ +sealed trait ComponentHealthState extends ToComponentHealthState with PrettyPrinting { + def isOk: Boolean = this match { + case ComponentHealthState.Ok(_) => true + case _ => false + } + def isFailed: Boolean = this match { + case ComponentHealthState.Failed(_) => true + case _ => false + } + override def toComponentHealthState: ComponentHealthState = this + override def pretty: Pretty[ComponentHealthState] = + ComponentHealthState.prettyComponentHealthState + + def toComponentStatusV0: proto.NodeStatus.ComponentStatus.Status = this match { + case Ok(description) => + proto.NodeStatus.ComponentStatus.Status + .Ok(proto.NodeStatus.ComponentStatus.StatusData(description)) + case Degraded(degraded) => + proto.NodeStatus.ComponentStatus.Status.Degraded(degraded.toComponentStatusDataV0) + case Failed(failed) => + proto.NodeStatus.ComponentStatus.Status.Failed(failed.toComponentStatusDataV0) + } +} + +object ComponentHealthState extends ShowUtil { + import PrettyUtil.* + + val ShutdownState: ComponentHealthState = + ComponentHealthState.failed("Component is closed") + val NotInitializedState: ComponentHealthState = + ComponentHealthState.failed("Not Initialized") + + // Json encoder implicits + + @nowarn("cat=lint-byname-implicit") // https://github.com/scala/bug/issues/12072 + implicit val componentHealthStateEncoder: Encoder[ComponentHealthState] = + deriveEncoder[ComponentHealthState] + + implicit val prettyComponentHealthState: Pretty[ComponentHealthState] = { + case ok: Ok => + prettyOfClass[Ok](unnamedParamIfDefined(_.description.map(_.unquoted))).treeOf(ok) + case notInitialized if notInitialized == NotInitializedState => + prettyOfString[ComponentHealthState](_ => "Not Initialized").treeOf(notInitialized) + case unhealthy: HasUnhealthyState => HasUnhealthyState.prettyHasUnhealthyState.treeOf(unhealthy) + } + + /** Ok state + */ + final case class Ok(description: Option[String] = None) extends ComponentHealthState + + object Ok { + implicit val okEncoder: Encoder[Ok.type] = Encoder.encodeString.contramap(_ => "ok") + } + + def failed(description: String): Failed = Failed(UnhealthyState(Some(description))) + + def degraded(description: String): Degraded = Degraded(UnhealthyState(Some(description))) + + /** Degraded state, as in not fully but still functional. A degraded component will NOT cause a service + * to report NOT_SERVING + * + * @param state data + */ + final case class Degraded(state: UnhealthyState = UnhealthyState()) + extends ComponentHealthState + with HasUnhealthyState + + object Degraded { + @nowarn("cat=lint-byname-implicit") // https://github.com/scala/bug/issues/12072 + implicit val degradedEncoder: Encoder[Degraded] = deriveEncoder[Degraded] + } + + /** The component has failed, any service that depends on it will report NOT_SERVING + * + * @param state data + */ + final case class Failed(state: UnhealthyState = UnhealthyState()) + extends ComponentHealthState + with HasUnhealthyState + + object Failed { + @nowarn("cat=lint-byname-implicit") // https://github.com/scala/bug/issues/12072 + implicit val failedEncoder: Encoder[Failed] = deriveEncoder[Failed] + } + + /** Unhealthy state data + * + * @param description description of the state + * @param error associated canton error + */ + final case class UnhealthyState( + description: Option[String] = None, + error: Option[BaseError] = None, + elc: Option[ErrorLoggingContext] = None, + ) { + val errorAsStringOpt: Option[String] = error.map { error => + s"${error.code.codeStr(elc.flatMap(_.traceContext.traceId))}: ${error.cause}" + } + + def toComponentStatusDataV0: proto.NodeStatus.ComponentStatus.StatusData = + proto.NodeStatus.ComponentStatus.StatusData(Some(this.show)) + } + + object UnhealthyState { + implicit val unhealthyStateEncoder: Encoder[UnhealthyState] = + Encoder.encodeString.contramap(_.show) + implicit val prettyUnhealthyState: Pretty[UnhealthyState] = prettyOfString[UnhealthyState] { + state => + s"${state.description.getOrElse("")}${state.errorAsStringOpt.map(e => s", error: $e").getOrElse("")}" + } + } + + object Unhealthy { + def unapply(state: ComponentHealthState): Option[UnhealthyState] = state match { + case _: Ok => None + case Degraded(degraded) => Some(degraded) + case Failed(failed) => Some(failed) + } + } + + object HasUnhealthyState { + // Use a separate pretty instance for HasUnhealthyState objects to slim down the Tree structure and avoid + // too many layers of wrapping + implicit val prettyHasUnhealthyState: Pretty[HasUnhealthyState] = + prettyOfClass[HasUnhealthyState]( + unnamedParamIfDefined(_.state.description.map(_.unquoted)), + paramIfDefined("error", _.state.errorAsStringOpt.map(_.unquoted)), + ) + } + trait HasUnhealthyState { def state: UnhealthyState } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/ComponentStatus.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/ComponentStatus.scala new file mode 100644 index 0000000000..081044121c --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/ComponentStatus.scala @@ -0,0 +1,59 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.health + +import cats.implicits.catsSyntaxEitherId +import com.digitalasset.canton.* +import com.digitalasset.canton.health.ComponentHealthState.* +import com.digitalasset.canton.health.admin.v0 as proto +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import io.circe.Encoder +import io.circe.generic.semiauto.deriveEncoder + +import scala.annotation.nowarn + +/** Simple representation of the health state of a component, easily (de)serializable (from)to protobuf or JSON + */ +final case class ComponentStatus(name: String, state: ComponentHealthState) extends PrettyPrinting { + def toProtoV0: proto.NodeStatus.ComponentStatus = + proto.NodeStatus.ComponentStatus( + name = name, + status = state.toComponentStatusV0, + ) + + override val pretty: Pretty[ComponentStatus] = ComponentStatus.componentStatusPretty +} + +object ComponentStatus { + def fromProtoV0(dependency: proto.NodeStatus.ComponentStatus): ParsingResult[ComponentStatus] = + dependency.status match { + case proto.NodeStatus.ComponentStatus.Status.Ok(value) => + ComponentStatus( + dependency.name, + ComponentHealthState.Ok(value.description), + ).asRight + case proto.NodeStatus.ComponentStatus.Status + .Degraded(value: proto.NodeStatus.ComponentStatus.StatusData) => + ComponentStatus( + dependency.name, + Degraded(UnhealthyState(value.description)), + ).asRight + case proto.NodeStatus.ComponentStatus.Status.Failed(value) => + ComponentStatus( + dependency.name, + Failed(UnhealthyState(value.description)), + ).asRight + case _ => + ProtoDeserializationError.UnrecognizedField("Unknown state").asLeft + } + + @nowarn("cat=lint-byname-implicit") // https://github.com/scala/bug/issues/12072 + implicit val componentStatusEncoder: Encoder[ComponentStatus] = deriveEncoder[ComponentStatus] + + implicit val componentStatusPretty: Pretty[ComponentStatus] = { + import Pretty.* + prettyInfix[ComponentStatus](_.name.unquoted, ":", _.state) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/CompositeHealthElement.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/CompositeHealthElement.scala new file mode 100644 index 0000000000..fa47fa3dff --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/CompositeHealthElement.scala @@ -0,0 +1,110 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.health + +import cats.Eval +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.lifecycle.RunOnShutdown +import com.digitalasset.canton.tracing.TraceContext + +import scala.collection.concurrent.TrieMap + +/** Defines a [[HealthElement]] that merely aggregates the state of other (dependent) [[HealthElement]]s. + * The dependencies need not be reported anywhere. + * + * If you need to manage a state separately for this component, + * add a dedicated dependency on [[AtomicHealthElement]]. + * + * @tparam ID The identifier type for the dependent health elements. + */ +trait CompositeHealthElement[ID, HE <: HealthElement] extends HealthElement { + + /** Fetch the current states from the relevant dependencies + * and combine them into the new state to report for this element. + */ + protected def combineDependentStates: State + + protected def refreshFromDependencies()(implicit traceContext: TraceContext): Unit = + refreshState(Eval.always(combineDependentStates)) + + private val dependencies: TrieMap[ID, HE] = TrieMap.empty[ID, HE] + private val dependencyListener: HealthListener = new HealthListener { + override def name: String = CompositeHealthElement.this.name + + override def poke()(implicit traceContext: TraceContext): Unit = + refreshFromDependencies() + } + + // Unregister all dependencies when this element is closed. + locally { + import TraceContext.Implicits.Empty.* + associatedOnShutdownRunner.runOnShutdown_(new RunOnShutdown { + override def name: String = s"unregister-$name-from-dependencies" + override def done: Boolean = false + override def run(): Unit = unregisterFromAll() + }) + } + + private def unregisterFromAll(): Unit = { + dependencies.foreachEntry((_, element) => + element.unregisterOnHealthChange(dependencyListener).discard[Boolean] + ) + } + + protected def getDependencies: Map[ID, HE] = dependencies.readOnlySnapshot().toMap + + protected def setDependency(id: ID, dependency: HE): Unit = + alterDependencies(add = Map(id -> dependency), remove = Set.empty) + + protected def removeDependency(id: ID): Unit = + alterDependencies(add = Map.empty, remove = Set(id)) + + /** First removes all dependencies in `remove`, then adds all those in `add`. + * If an `ID` appears in `remove` and `add`, then the `ID` is replaced. + * Refreshes the state if any of the dependencies was changed. + * + * Updates of `dependencies` are not atomic: If this method is called concurrently + * multiple times, the resulting dependencies may not correspond to a serializable execution. + * + * If an dependency triggers a concurrent state refresh, then the state refresh may see + * an inconsistent set of dependencies and therefore derive an inconsistent state. + * This however is only temporary as in this case another state refresh will be triggered at the end. + */ + protected def alterDependencies(remove: Set[ID], add: Map[ID, HE]): Unit = { + def removeId(id: ID): Boolean = + if (add.contains(id)) false + else + dependencies.remove(id) match { + case None => false + case Some(removed) => + removed.unregisterOnHealthChange(dependencyListener).discard[Boolean] + true + } + + def addOrReplace(id: ID, dependency: HE): Boolean = + dependencies.put(id, dependency) match { + case Some(`dependency`) => false + case other => + other.foreach(_.unregisterOnHealthChange(dependencyListener).discard[Boolean]) + dependency.registerOnHealthChange(dependencyListener).discard[Boolean] + true + } + + if (!associatedOnShutdownRunner.isClosing) { + val removedAtLeastOne = remove.map(removeId).exists(Predef.identity) + val addedAtLeastOne = + add.map { case (id, dependency) => addOrReplace(id, dependency) }.exists(Predef.identity) + val dependenciesChanged = addedAtLeastOne || removedAtLeastOne + // Since the associatedOnShutdownRunner may have started closing while we've been modifying the dependencies, + // query the closing flag again and repeat the unregistration + if (associatedOnShutdownRunner.isClosing) { + unregisterFromAll() + } else if (dependenciesChanged) refreshFromDependencies()(TraceContext.empty) + } + } +} + +trait CompositeHealthComponent[ID, HE <: HealthElement] + extends CompositeHealthElement[ID, HE] + with HealthComponent diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/HealthComponent.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/HealthComponent.scala new file mode 100644 index 0000000000..196b2b6fac --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/HealthComponent.scala @@ -0,0 +1,38 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.health + +import com.digitalasset.canton.lifecycle.OnShutdownRunner +import com.digitalasset.canton.logging.TracedLogger +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} + +/** Refines the state of a [[HealthElement]] to [[ToComponentHealthState]] */ +trait HealthQuasiComponent extends HealthElement { + override type State <: ToComponentHealthState & PrettyPrinting + override protected def prettyState: Pretty[State] = Pretty[State] + + def isFailed: Boolean = getState.toComponentHealthState.isFailed + def toComponentStatus: ComponentStatus = ComponentStatus(name, getState.toComponentHealthState) + + override def closingState: State +} + +/** Refines the state of a [[HealthElement]] to [[ComponentHealthState]] */ +trait HealthComponent extends HealthQuasiComponent { + override type State = ComponentHealthState + + override def closingState: ComponentHealthState = ComponentHealthState.ShutdownState +} + +object HealthComponent { + class AlwaysHealthyComponent( + override val name: String, + override protected val logger: TracedLogger, + ) extends HealthComponent { + override protected def initialHealthState: ComponentHealthState = ComponentHealthState.Ok() + override def closingState: ComponentHealthState = ComponentHealthState.Ok() + override protected def associatedOnShutdownRunner: OnShutdownRunner = + new OnShutdownRunner.PureOnShutdownRunner(logger) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/HealthElement.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/HealthElement.scala new file mode 100644 index 0000000000..c4892ed8b7 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/HealthElement.scala @@ -0,0 +1,226 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.health + +import cats.Eval +import com.digitalasset.canton.lifecycle.{OnShutdownRunner, RunOnShutdown} +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.logging.{ErrorLoggingContext, TracedLogger} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.canton.util.TryUtil.* +import com.digitalasset.canton.util.{ErrorUtil, LoggerUtil} + +import java.util.ConcurrentModificationException +import java.util.concurrent.atomic.AtomicReference +import scala.annotation.tailrec +import scala.collection.concurrent +import scala.collection.concurrent.TrieMap +import scala.concurrent.duration.{Duration, DurationInt} +import scala.util.Try + +/** A [[HealthElement]] maintains a health state and notifies [[HealthListener]]s whenever the state has changed. + * + * [[HealthElement]]s are refined in three dimensions + * - They can be atomic maintaining their own state ([[AtomicHealthElement]]) or + * composite ([[CompositeHealthElement]]) aggregating the states of their dependencies + * - The state can be refined to [[ToComponentHealthState]] with [[HealthQuasiComponent]] + * and further to [[ComponentHealthState]] with [[HealthComponent]]. + * - Whether they need to be closed on their own ([[CloseableHealthElement]]). + * + * The traits from each dimension can be mixed together to create the appropriate combination. + * Do not mix several traits from the same dimension! + */ +trait HealthElement { + import HealthElement.* + import HealthElement.RefreshingState.* + + /** Name of the health element. Used for logging. */ + def name: String + + /** The set of currently registered listeners */ + private val listeners: concurrent.Map[HealthListener, Unit] = + TrieMap.empty[HealthListener, Unit] + + /** Registers a listener that gets poked upon each change of this element's health state. + * + * @return Whether the listener was not registered before + */ + def registerOnHealthChange(listener: HealthListener): Boolean = { + val isNew = listeners.putIfAbsent(listener, ()).isEmpty + if (isNew) listener.poke()(TraceContext.empty) + isNew + } + + /** Unregisters a listener. + * + * @return Whether the listener was registered before. + */ + def unregisterOnHealthChange(listener: HealthListener): Boolean = + listeners.remove(listener).isDefined + + /** The type of health states exposed by this component */ + type State + protected def prettyState: Pretty[State] + + private lazy val internalState: AtomicReference[InternalState[State]] = + new AtomicReference[InternalState[State]](InternalState(initialHealthState, Idle)) + + /** Returns the current state */ + final def getState: State = internalState.get().state + + /** The initial state upon creation */ + protected def initialHealthState: State + + /** The state set when the [[associatedOnShutdownRunner]] closes */ + protected def closingState: State + + /** The [[com.digitalasset.canton.lifecycle.OnShutdownRunner]] associated with this object. + * + * When this [[com.digitalasset.canton.lifecycle.OnShutdownRunner]] closes, the health state permanently becomes [[closingState]] + * and all listeners are notified about this. + */ + protected def associatedOnShutdownRunner: OnShutdownRunner + + locally { + import TraceContext.Implicits.Empty.* + associatedOnShutdownRunner.runOnShutdown_(new RunOnShutdown { + override def name: String = s"set-closing-state-of-${HealthElement.this.name}" + override def done: Boolean = false + override def run(): Unit = refreshState(Eval.now(closingState)) + }) + } + + protected def logger: TracedLogger + + /** Triggers a refresh of the component's state, using `newState` to determine the new state. + * May return before the `newState` has been evaluated and the listeners have been poked. + * + * Note that listeners need not be poked about every state change; + * it suffices that they are poked eventually after each state change. + * So if there are frequent updates to the state, possibly from concurrent calls, + * then the listeners may never see some intermediate states. + */ + protected def refreshState( + newState: Eval[State] + )(implicit traceContext: TraceContext): Unit = { + val previous = internalState.getAndUpdate { + case InternalState(s, Idle) => InternalState(s, Refreshing) + case InternalState(s, Refreshing) => InternalState(s, Poked(newState)) + case InternalState(s, Poked(_)) => InternalState(s, Poked(newState)) + } + previous.refreshing match { + case Idle => doRefresh(previous.state, newState) + case Refreshing | Poked(_) => + } + } + + @tailrec private def doRefresh( + oldState: State, + newState: Eval[State], + )(implicit traceContext: TraceContext): Unit = { + def errorOnIdle: Nothing = { + implicit val errorLoggingContext: ErrorLoggingContext = + ErrorLoggingContext.fromTracedLogger(logger) + ErrorUtil.internalError( + new ConcurrentModificationException(s"State changed to $Idle while $Refreshing") + ) + } + // When we're closing, force the value to `closingState`. + // This ensures that `closingState` is sticky. + val newStateValue = if (associatedOnShutdownRunner.isClosing) closingState else newState.value + logger.debug(s"Refreshing state of $name from $oldState to $newStateValue") + + val previous = internalState.getAndUpdate { + case InternalState(_, Idle) => errorOnIdle + case InternalState(_, Refreshing) => InternalState(newStateValue, Idle) + case InternalState(_, Poked(_)) => InternalState(newStateValue, Refreshing) + } + if (previous.state != oldState) { + implicit val errorLoggingContext: ErrorLoggingContext = + ErrorLoggingContext.fromTracedLogger(logger) + ErrorUtil.internalError( + new ConcurrentModificationException( + s"State changed from $oldState to ${previous.state} while doRefresh was running" + ) + ) + } + if (newStateValue != oldState) { + logStateChange(oldState, newStateValue) + notifyListeners + } + previous.refreshing match { + case Idle => errorOnIdle + case Refreshing => () + case Poked(eval) => doRefresh(newStateValue, eval) + } + } + + private def logIfLongPokeTime(listener: HealthListener, start: Long)(implicit + traceContext: TraceContext + ): Unit = { + val dur = Duration.fromNanos(System.nanoTime() - start) + lazy val durationStr = LoggerUtil.roundDurationForHumans(dur) + if (dur > 1.second) logger.warn(s"Listener ${listener.name} took $durationStr to run") + } + + private def notifyListeners(implicit traceContext: TraceContext): Unit = { + listeners.foreachEntry { (listener, _) => + logger.debug(s"Notifying listener ${listener.name} of health state change from $name") + val start = System.nanoTime() + Try(listener.poke()).forFailed { exception => + logger.error(s"Notification of ${listener.name} failed", exception) + } + logIfLongPokeTime(listener, start) + } + } + + private def logStateChange( + oldState: State, + newState: State, + )(implicit traceContext: TraceContext): Unit = { + implicit val prettyS: Pretty[State] = prettyState + logger.info(show"${name.singleQuoted} is now in state $newState. Previous state was $oldState.") + } +} + +object HealthElement { + + /** The internal state of a [[HealthElement]] consists of the current health state `state` and state of the refreshing state machine */ + private final case class InternalState[+S](state: S, refreshing: RefreshingState[S]) + + /** The states of the refreshing state machine implemented by [[HealthElement.refreshState]] + * and [[HealthElement.doRefresh]]. + * - In [[RefreshingState.Idle]], nothing is happening. + * - In [[RefreshingState.Refreshing]], the one thread that caused the transition + * from [[RefreshingState.Idle]] to [[RefreshingState.Refreshing]] is updating the state + * using the given `newState` method to obtain the new state. + * - If another call to [[HealthElement.refreshState]] happens concurrently, + * the update is queued in state [[RefreshingState.Poked]] with the given `newState` method. + * - Further calls to [[HealthElement.refreshState]] in state [[RefreshingState.Poked]] + * replace the previous [[RefreshingState.Poked]] state. + * - When the thread performing the state update has finished and finds + * that there are queued updates ([[RefreshingState.Poked]]), + * it runs another update with the queued `newState` method. + * Otherwise, the state returns back to [[RefreshingState.Idle]]. + * + *
+    * ┌──────┐    refreshState       ┌────────────┐     refreshState   ┌──────────┐
+    * │      ├───────────────────────►            ├────────────────────►          ├───────┐
+    * │ Idle │                       │ Refreshing │                    │  Poked   │       │refresh
+    * │      │                       │            │                    │ newState │       │State
+    * │      ◄───────────────────────┤            ◄────────────────────┤          ◄───────┘
+    * └──────┘    done refreshing    └────────────┘  done refreshing   └──────────┘
+    * 
+ * + * Listeners are notified after each state update that does change the state, even if further state updates are queued. + * This ensures that continuous state changes propagate to the listeners timely. + */ + private[HealthElement] sealed trait RefreshingState[+S] extends Product with Serializable + private[HealthElement] object RefreshingState { + case object Idle extends RefreshingState[Nothing] + final case object Refreshing extends RefreshingState[Nothing] + final case class Poked[S](newState: Eval[S]) extends RefreshingState[S] + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/HealthListener.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/HealthListener.scala new file mode 100644 index 0000000000..8550a8809e --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/HealthListener.scala @@ -0,0 +1,34 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.health + +import com.digitalasset.canton.tracing.TraceContext + +/** A listener on the state of a [[HealthElement]] */ +trait HealthListener { + + /** Name of the listener. Used for logging. Must not throw. */ + def name: String + + /** Called after the state of a health element has changed. + * + * Implementations must be thread-safe in the sense that multiple notifications can run concurrently. + * Implementations must not block and should not execute significant computations as part of this call. + * In particular, it is wrong to assume that the listener has finished its updates by the time + * this method returns. + * + * We explicitly do NOT pass along the new state of the health element nor the health element itself. + * Instead, the listener must query the current state using [[HealthElement.getState]]. + * This ensures that we do not need to synchronize concurrent updates and notifications; + * the state obtained [[HealthElement.getState]] is guaranteed to be at least as up to date as the notification. + */ + def poke()(implicit traceContext: TraceContext): Unit +} + +object HealthListener { + def apply(n: String)(onPoke: => Unit): HealthListener = new HealthListener { + override def name: String = n + override def poke()(implicit traceContext: TraceContext): Unit = onPoke + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/HealthService.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/HealthService.scala new file mode 100644 index 0000000000..d0678d2f55 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/HealthService.scala @@ -0,0 +1,64 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.health + +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.logging.TracedLogger +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.logging.pretty.Pretty.* +import io.grpc.health.v1.HealthCheckResponse.ServingStatus + +/** A [[HealthService]] aggregates [[CloseableHealthComponent]]s under critical and soft dependencies. + * Services are queryable through their name in the gRPC Health Check service. + * Both critical and soft dependencies are reported under their names too. + * + * The state of the [[HealthService]] is [[io.grpc.health.v1.HealthCheckResponse.ServingStatus.SERVING]] + * if and only if none of the critical dependencies have failed. Soft dependencies are merely reported + * as dependencies, but do not influence the status of the [[HealthService]] itself. + */ +final class HealthService( + override val name: String, + override protected val logger: TracedLogger, + override protected val timeouts: ProcessingTimeout, + private val criticalDependencies: Seq[HealthQuasiComponent], + private val softDependencies: Seq[HealthQuasiComponent], +) extends CloseableHealthElement + with CompositeHealthElement[String, HealthQuasiComponent] { + + alterDependencies( + remove = Set.empty, + add = criticalDependencies.map(dep => dep.name -> dep).toMap, + ) + + override protected def closingState: ServingStatus = ServingStatus.NOT_SERVING + + override type State = ServingStatus + override protected def prettyState: Pretty[ServingStatus] = Pretty[ServingStatus] + + override protected def combineDependentStates: ServingStatus = { + if (criticalDependencies.forall(!_.isFailed)) ServingStatus.SERVING + else ServingStatus.NOT_SERVING + } + + override protected def initialHealthState: ServingStatus = + if (criticalDependencies.isEmpty) ServingStatus.SERVING else ServingStatus.NOT_SERVING + + def dependencies: Seq[HealthQuasiComponent] = criticalDependencies ++ softDependencies +} + +object HealthService { + def apply( + name: String, + logger: TracedLogger, + timeouts: ProcessingTimeout, + criticalDependencies: Seq[HealthQuasiComponent] = Seq.empty, + softDependencies: Seq[HealthQuasiComponent] = Seq.empty, + ): HealthService = + new HealthService(name, logger, timeouts, criticalDependencies, softDependencies) + + implicit val prettyServiceHealth: Pretty[HealthService] = prettyOfClass( + param("name", _.name.unquoted), + param("state", _.getState), + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/MutableHealthComponent.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/MutableHealthComponent.scala new file mode 100644 index 0000000000..e8eff7a782 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/MutableHealthComponent.scala @@ -0,0 +1,109 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.health + +import cats.syntax.functor.* +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} + +/** A [[CloseableHealthQuasiComponent]] that derives its state from an optional mutable [[HealthQuasiComponent]]. + * Use when the health component is not instantiated at bootstrap time and/or changes during the lifetime. + * + * Must be closed separately. + * + * @param uninitializedName name used to identify this component while it has not yet been initialized + * @param initialHealthState state the component will return while it has not yet been initialized + */ +class MutableHealthQuasiComponent[H <: HealthQuasiComponent]( + override protected val loggerFactory: NamedLoggerFactory, + uninitializedName: String, + override protected val initialHealthState: H#State, + override protected val timeouts: ProcessingTimeout, + initialClosingState: H#State, +) extends CloseableHealthQuasiComponent + with CompositeHealthElement[Unit, H] + with NamedLogging { + override type State = H#State + + private def currentDelegate: Option[H] = getDependencies.get(()) + + override def name: String = + currentDelegate.map(_.name).getOrElse(uninitializedName) + + override def closingState: State = + currentDelegate.map(_.closingState).getOrElse(initialClosingState) + + def set(element: H): Unit = + alterDependencies(remove = Set.empty, add = Map(() -> element)) + + override protected def prettyState: Pretty[H#State] = implicitly[Pretty[H#State]] + + override protected def combineDependentStates: State = + currentDelegate.map(_.getState).getOrElse(initialHealthState) +} + +/** Refines a [[MutableHealthQuasiComponent]] state to [[ComponentHealthState]] + */ +final class MutableHealthComponent( + loggerFactory: NamedLoggerFactory, + uninitializedName: String, + timeouts: ProcessingTimeout, + shutdownState: ComponentHealthState, +) extends MutableHealthQuasiComponent[HealthComponent]( + loggerFactory, + uninitializedName, + ComponentHealthState.NotInitializedState, + timeouts, + shutdownState, + ) + with HealthComponent + +object MutableHealthComponent { + def apply( + loggerFactory: NamedLoggerFactory, + uninitializedName: String, + timeouts: ProcessingTimeout, + shutdownState: ComponentHealthState = ComponentHealthState.ShutdownState, + ): MutableHealthComponent = + new MutableHealthComponent(loggerFactory, uninitializedName, timeouts, shutdownState) +} + +/** A health component that aggregates the health state of multiple mutable dependencies using `reduceState` + * into a single health state. + * + * @param reduceState Computes the aggregate health state of the component given the health states of all dependencies. + * When given the empty map, produces the initial state. + */ +abstract class DelegatingMutableHealthQuasiComponent[Id, H <: HealthQuasiComponent]( + override protected val loggerFactory: NamedLoggerFactory, + override val name: String, + override protected val timeouts: ProcessingTimeout, + private val reduceState: Map[Id, H#State] => H#State, +) extends CloseableHealthQuasiComponent + with CompositeHealthElement[Id, H] + with NamedLogging { + override type State = H#State + + override protected def initialHealthState: H#State = reduceState(Map.empty) + + override protected def combineDependentStates: State = + reduceState(getDependencies.fmap(_.getState)) + + def set(id: Id, element: H): Unit = setDependency(id, element) +} + +final class DelegatingMutableHealthComponent[Id]( + override val loggerFactory: NamedLoggerFactory, + name: String, + override val timeouts: ProcessingTimeout, + reduceStateFromMany: Map[Id, ComponentHealthState] => ComponentHealthState, + override val closingState: ComponentHealthState, +) extends DelegatingMutableHealthQuasiComponent[Id, HealthComponent]( + loggerFactory, + name, + timeouts, + reduceStateFromMany, + ) + with CloseableHealthComponent diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/ServiceHealthStatusManager.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/ServiceHealthStatusManager.scala new file mode 100644 index 0000000000..14241b1f36 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/ServiceHealthStatusManager.scala @@ -0,0 +1,15 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.health + +import io.grpc.protobuf.services.HealthStatusManager + +/** Combines a [[io.grpc.protobuf.services.HealthStatusManager]] (exposed as a gRPC health service) + * with the set of [[HealthService]]s it needs to report on. + */ +final case class ServiceHealthStatusManager( + name: String, + manager: HealthStatusManager, + services: Set[HealthService], +) diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/ToComponentHealthState.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/ToComponentHealthState.scala new file mode 100644 index 0000000000..d9b7801eb1 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/health/ToComponentHealthState.scala @@ -0,0 +1,13 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.health + +/** Interface that provides conversion from a State type to [[ComponentHealthState]] + */ +trait ToComponentHealthState { + + /** Convert a state to [[ComponentHealthState]] + */ + def toComponentHealthState: ComponentHealthState +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/ClosingException.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/ClosingException.scala new file mode 100644 index 0000000000..c2d20688cc --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/ClosingException.scala @@ -0,0 +1,21 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.lifecycle + +import java.util.concurrent.RejectedExecutionException + +abstract class CancellationException(message: String) extends RuntimeException(message) + +/** An operation has been cancelled due to shutdown/closing of a component. */ +class ClosedCancellationException(message: String) extends CancellationException(message) + +/** Helper to pattern match for exceptions that may happen during shutdown/closing. */ +object ClosingException { + def apply(t: Throwable): Boolean = t match { + case _: RejectedExecutionException | _: CancellationException | _: InterruptedException => true + case _ => false + } + + def unapply(t: Throwable): Option[Throwable] = Some(t).filter(apply) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FlagCloseable.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FlagCloseable.scala new file mode 100644 index 0000000000..34a16c7eb2 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FlagCloseable.scala @@ -0,0 +1,146 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.lifecycle + +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.logging.{ErrorLoggingContext, TracedLogger} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.Thereafter +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.TryUtil.* +import org.slf4j.event.Level + +import scala.concurrent.ExecutionContext +import scala.concurrent.duration.{Duration, DurationInt, FiniteDuration} +import scala.util.Try + +/** Adds the [[java.lang.AutoCloseable.close]] method to the interface of [[PerformUnlessClosing]]. + * The component's custom shutdown behaviour should override the `onClosed` method. + */ +trait FlagCloseable extends AutoCloseable with PerformUnlessClosing { + protected def timeouts: ProcessingTimeout + + override protected def closingTimeout: FiniteDuration = timeouts.closing.asFiniteApproximation + + override final def close(): Unit = super.close() +} + +object FlagCloseable { + def apply(tracedLogger: TracedLogger, timeoutsArgs: ProcessingTimeout): FlagCloseable = + new FlagCloseable { + override protected def logger: TracedLogger = tracedLogger + override protected def timeouts: ProcessingTimeout = timeoutsArgs + } +} + +/** Context to capture and pass through a caller's closing state. + * + * This allows us for example to stop operations down the call graph if either the caller or the current component + * executing an operation is closed. + */ +final case class CloseContext(private val flagCloseable: FlagCloseable) { + def context: PerformUnlessClosing = flagCloseable +} + +object CloseContext { + + /** Combines the 2 given close contexts such that if any of them gets closed, + * the returned close context is also closed. Works like an OR operator. + * However if this returned close context is closed directly, the 2 given + * closed contexts are _NOT_ closed, neither will it wait for any pending + * tasks on any of the 2 given close context to finish. + * + * NOTE: YOU MUST CLOSE THE CONTEXT MANUALLY IN ORDER TO AVOID PILING UP + * NEW TASKS ON THE RUNONSHUTDOWN HOOK OF THE PARENT CONTEXTS + */ + def combineUnsafe( + closeContext1: CloseContext, + closeContext2: CloseContext, + processingTimeout: ProcessingTimeout, + tracedLogger: TracedLogger, + )(implicit traceContext: TraceContext): CloseContext = { + // TODO(#8594) Add a test that this correctly implements the performUnlessClosing semantics + // Currently, this is broken because if both closeContext1 and closeContext2 are closed concurrently, + // then the close of the created flagCloseable will terminate early for the second call to its close method + // and thus not delay that closeContext's closing. + val flagCloseable = FlagCloseable(tracedLogger, processingTimeout) + val cancelToken1 = closeContext1.context.runOnShutdown(new RunOnShutdown { + override def name: String = s"combined-close-ctx1" + override def done: Boolean = + closeContext1.context.isClosing && closeContext2.context.isClosing + override def run(): Unit = flagCloseable.close() + }) + val cancelToken2 = closeContext2.context.runOnShutdown(new RunOnShutdown { + override def name: String = s"combined-close-ctx2" + override def done: Boolean = + closeContext1.context.isClosing && closeContext2.context.isClosing + override def run(): Unit = flagCloseable.close() + }) + flagCloseable.runOnShutdown_(new RunOnShutdown { + override def name: String = "cancel-close-propagation-of-combined-context" + override def done: Boolean = + !closeContext1.context.containsShutdownTask(cancelToken1) && + !closeContext2.context.containsShutdownTask(cancelToken2) + override def run(): Unit = { + closeContext1.context.cancelShutdownTask(cancelToken1) + closeContext2.context.cancelShutdownTask(cancelToken2) + } + }) + CloseContext(flagCloseable) + } + + def withCombinedContext[F[_], T]( + closeContext1: CloseContext, + closeContext2: CloseContext, + processingTimeout: ProcessingTimeout, + tracedLogger: TracedLogger, + )(func: CloseContext => F[T])(implicit + traceContext: TraceContext, + ex: ExecutionContext, + F: Thereafter[F], + ): F[T] = { + val tmp = combineUnsafe(closeContext1, closeContext2, processingTimeout, tracedLogger) + func(tmp).thereafter(_ => tmp.flagCloseable.close()) + } +} + +/** Mix-in to obtain a [[CloseContext]] implicit based on the class's [[FlagCloseable]] */ +trait HasCloseContext extends PromiseUnlessShutdownFactory { self: FlagCloseable => + implicit val closeContext: CloseContext = CloseContext(self) +} + +trait PromiseUnlessShutdownFactory { self: HasCloseContext => + protected def logger: TracedLogger + + /** Use this method to create a PromiseUnlessShutdown that will automatically be cancelled when the close context + * is closed. This allows proper clean up of stray promises when the node is transitioning to a passive state. + * + * Note: you should *not* invoke `success` on the returned promise but rather use `trySuccess`. The reason is that + * the call to `success` may fail in case of shutdown. + */ + def mkPromise[A]( + description: String, + futureSupervisor: FutureSupervisor, + logAfter: Duration = 10.seconds, + logLevel: Level = Level.DEBUG, + )(implicit elc: ErrorLoggingContext, ec: ExecutionContext): PromiseUnlessShutdown[A] = { + val promise = new PromiseUnlessShutdown[A](description, futureSupervisor, logAfter, logLevel) + + val cancelToken = closeContext.context.runOnShutdown(new RunOnShutdown { + override def name: String = s"$description-abort-promise-on-shutdown" + override def done: Boolean = promise.isCompleted + override def run(): Unit = promise.shutdown() + })(elc.traceContext) + + promise.future + .onComplete { _ => + Try(closeContext.context.cancelShutdownTask(cancelToken)).forFailed(e => + logger.debug(s"Failed to cancel shutdown task for $description", e)(elc.traceContext) + ) + } + + promise + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FlagCloseableAsync.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FlagCloseableAsync.scala new file mode 100644 index 0000000000..7a2ac28713 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FlagCloseableAsync.scala @@ -0,0 +1,60 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.lifecycle + +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.config.RefinedNonNegativeDuration +import com.digitalasset.canton.logging.ErrorLoggingContext + +import scala.concurrent.{Future, TimeoutException} + +/** AutoCloseableAsync eases the proper closing of futures. + */ +trait FlagCloseableAsync extends FlagCloseable { + + /** closeAsync asynchronously releases resources held by a future + * @return an ordered sequence of async and sync closeables with async closeables made up of future and timeout + */ + protected def closeAsync(): Seq[AsyncOrSyncCloseable] + + final override def onClosed(): Unit = Lifecycle.close(closeAsync() *)(logger) +} + +trait AsyncOrSyncCloseable extends AutoCloseable + +class AsyncCloseable[D <: RefinedNonNegativeDuration[D]] private ( + name: String, + closeFuture: () => Future[?], + timeout: D, + onTimeout: TimeoutException => Unit, +)(implicit + loggingContext: ErrorLoggingContext +) extends AsyncOrSyncCloseable { + override def close(): Unit = + timeout.await(s"closing $name", onTimeout = onTimeout)(closeFuture()).discard + + override def toString: String = s"AsyncCloseable(name=$name)" +} + +object AsyncCloseable { + def apply[D <: RefinedNonNegativeDuration[D]]( + name: String, + closeFuture: => Future[?], + timeout: D, + onTimeout: TimeoutException => Unit = _ => (), + )(implicit + loggingContext: ErrorLoggingContext + ): AsyncCloseable[D] = + new AsyncCloseable(name, () => closeFuture, timeout, onTimeout) +} + +class SyncCloseable private (name: String, sync: () => Unit) extends AsyncOrSyncCloseable { + override def close(): Unit = sync() + override def toString: String = s"SyncCloseable(name=$name)" +} + +object SyncCloseable { + def apply(name: String, sync: => Unit): SyncCloseable = + new SyncCloseable(name, () => sync) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdown.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdown.scala new file mode 100644 index 0000000000..919985c4be --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdown.scala @@ -0,0 +1,340 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.lifecycle + +import cats.arrow.FunctionK +import cats.data.EitherT +import cats.{Applicative, FlatMap, Functor, Id, Monad, MonadThrow, Monoid, Parallel, ~>} +import com.digitalasset.canton.logging.ErrorLoggingContext +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.{LoggerUtil, Thereafter} +import com.digitalasset.canton.{DoNotDiscardLikeFuture, DoNotTraverseLikeFuture} + +import scala.concurrent.{Awaitable, ExecutionContext, Future} +import scala.util.{Failure, Success, Try} + +object FutureUnlessShutdown { + + /** Close the type abstraction of [[FutureUnlessShutdown]] */ + def apply[A](x: Future[UnlessShutdown[A]]): FutureUnlessShutdown[A] = { + type K[T[_]] = Id[T[A]] + FutureUnlessShutdownImpl.Instance.subst[K](x) + } + + /** Immediately report [[UnlessShutdown.AbortedDueToShutdown]] */ + val abortedDueToShutdown: FutureUnlessShutdown[Nothing] = + FutureUnlessShutdown(Future.successful(UnlessShutdown.AbortedDueToShutdown)) + + /** Analog to [[scala.concurrent.Future]]`.unit` */ + val unit: FutureUnlessShutdown[Unit] = FutureUnlessShutdown( + Future.successful(UnlessShutdown.unit) + ) + + /** Analog to [[scala.concurrent.Future]]`.successful` */ + def pure[A](x: A): FutureUnlessShutdown[A] = lift(UnlessShutdown.Outcome(x)) + + def lift[A](x: UnlessShutdown[A]): FutureUnlessShutdown[A] = FutureUnlessShutdown( + Future.successful(x) + ) + + /** Wraps the result of a [[scala.concurrent.Future]] into an [[UnlessShutdown.Outcome]] */ + def outcomeF[A](f: Future[A])(implicit ec: ExecutionContext): FutureUnlessShutdown[A] = + FutureUnlessShutdown(f.map(UnlessShutdown.Outcome(_))) + + /** [[outcomeF]] as a [[cats.arrow.FunctionK]] to be used with Cat's `mapK` operation. + * + * Can be used to switch from [[scala.concurrent.Future]] to [[FutureUnlessShutdown]] inside another + * functor/applicative/monad such as [[cats.data.EitherT]] via `eitherT.mapK(outcomeK)`. + */ + def outcomeK(implicit ec: ExecutionContext): Future ~> FutureUnlessShutdown = + // We can't use `FunktionK.lift` here because of the implicit execution context. + new FunctionK[Future, FutureUnlessShutdown] { + override def apply[A](future: Future[A]): FutureUnlessShutdown[A] = outcomeF(future) + } + + def liftK: UnlessShutdown ~> FutureUnlessShutdown = FunctionK.lift(lift) + + /** Analog to [[scala.concurrent.Future]]`.failed` */ + def failed[A](ex: Throwable): FutureUnlessShutdown[A] = FutureUnlessShutdown(Future.failed(ex)) + + /** Analog to [[scala.concurrent.Future]]`.fromTry` */ + def fromTry[T](result: Try[T]): FutureUnlessShutdown[T] = result match { + case Success(value) => FutureUnlessShutdown.pure(value) + case Failure(exception) => FutureUnlessShutdown.failed(exception) + } +} + +/** Monad combination of `Future` and [[UnlessShutdown]] + * + * We avoid wrapping and unwrapping it by emulating Scala 3's opaque types. + * This makes the asynchronous detection magic work out of the box for [[FutureUnlessShutdown]] + * because `FutureUnlessShutdown(x).isInstanceOf[Future]` holds at runtime. + */ +sealed abstract class FutureUnlessShutdownImpl { + + /** The abstract type of a [[scala.concurrent.Future]] containing a [[UnlessShutdown]]. + * We can't make it a subtype of [[scala.concurrent.Future]]`[`[[UnlessShutdown]]`]` itself + * because we want to change the signature and implementation of some methods like [[scala.concurrent.Future.flatMap]]. + * So [[FutureUnlessShutdown]] up-casts only into an [[scala.concurrent.Awaitable]]. + * + * The canonical name for this type would be `T`, but `FutureUnlessShutdown` gives better error messages. + */ + @DoNotDiscardLikeFuture + @DoNotTraverseLikeFuture + type FutureUnlessShutdown[+A] <: Awaitable[UnlessShutdown[A]] + + /** Methods to evidence that [[FutureUnlessShutdown]] and [[scala.concurrent.Future]]`[`[[UnlessShutdown]]`]` + * can be replaced in any type context `K`. + */ + private[lifecycle] def subst[K[_[_]]]( + ff: K[Lambda[a => Future[UnlessShutdown[a]]]] + ): K[FutureUnlessShutdown] + // Technically, we could implement `unsubst` using `subst`, but it may be clearer if we make both directions explicit. + private[lifecycle] def unsubst[K[_[_]]]( + ff: K[FutureUnlessShutdown] + ): K[Lambda[a => Future[UnlessShutdown[a]]]] +} + +object FutureUnlessShutdownImpl { + val Instance: FutureUnlessShutdownImpl = new FutureUnlessShutdownImpl { + override type FutureUnlessShutdown[+A] = Future[UnlessShutdown[A]] + + override private[lifecycle] def subst[F[_[_]]]( + ff: F[Lambda[a => Future[UnlessShutdown[a]]]] + ): F[FutureUnlessShutdown] = ff + override private[lifecycle] def unsubst[F[_[_]]]( + ff: F[FutureUnlessShutdown] + ): F[Lambda[a => Future[UnlessShutdown[a]]]] = ff + } + + /** Extension methods for [[FutureUnlessShutdown]] */ + implicit final class Ops[+A](private val self: FutureUnlessShutdown[A]) extends AnyVal { + + /** Open the type abstraction */ + def unwrap: Future[UnlessShutdown[A]] = { + type K[T[_]] = Id[T[A]] + Instance.unsubst[K](self) + } + + /** Analog to [[scala.concurrent.Future]].`transform` */ + def transform[B](f: Try[UnlessShutdown[A]] => Try[UnlessShutdown[B]])(implicit + ec: ExecutionContext + ): FutureUnlessShutdown[B] = + FutureUnlessShutdown(unwrap.transform(f)) + + def transformIntoSuccess[B](f: Try[UnlessShutdown[A]] => UnlessShutdown[B])(implicit + ec: ExecutionContext + ): FutureUnlessShutdown[B] = + transform(x => Success(f(x))) + + /** Analog to [[scala.concurrent.Future]].`transform` */ + def transform[B]( + success: UnlessShutdown[A] => UnlessShutdown[B], + failure: Throwable => Throwable, + )(implicit ec: ExecutionContext): FutureUnlessShutdown[B] = + FutureUnlessShutdown(unwrap.transform(success, failure)) + + /** Analog to [[scala.concurrent.Future.transformWith]] */ + def transformWith[B]( + f: Try[UnlessShutdown[A]] => FutureUnlessShutdown[B] + )(implicit ec: ExecutionContext): FutureUnlessShutdown[B] = { + type K[F[_]] = Try[UnlessShutdown[A]] => F[B] + FutureUnlessShutdown(unwrap.transformWith(Instance.unsubst[K](f))) + } + + /** Analog to [[scala.concurrent.Future]].onComplete */ + def onComplete[B](f: Try[UnlessShutdown[A]] => Unit)(implicit ec: ExecutionContext): Unit = + unwrap.onComplete(f) + + /** Analog to [[scala.concurrent.Future]].`failed` */ + def failed: Future[Throwable] = self.unwrap.failed + + /** Evaluates `f` and returns its result if this future completes with [[UnlessShutdown.AbortedDueToShutdown]]. */ + def onShutdown[B >: A](f: => B)(implicit ec: ExecutionContext): Future[B] = + unwrap.map(_.onShutdown(f)) + + def failOnShutdownTo(t: => Throwable)(implicit ec: ExecutionContext): Future[A] = { + unwrap.flatMap { + case UnlessShutdown.Outcome(result) => Future.successful(result) + case UnlessShutdown.AbortedDueToShutdown => Future.failed(t) + } + } + + def isCompleted: Boolean = { + unwrap.isCompleted + } + + /** Evaluates `f` on shutdown but retains the result of the future. */ + def tapOnShutdown(f: => Unit)(implicit + ec: ExecutionContext, + errorLoggingContext: ErrorLoggingContext, + ): FutureUnlessShutdown[A] = FutureUnlessShutdown { + import Thereafter.syntax.* + this.unwrap.thereafter { + case Success(UnlessShutdown.AbortedDueToShutdown) => LoggerUtil.logOnThrow(f) + case _ => + } + } + + // This method is here so that we don't need to import ```cats.syntax.flatmap._``` everywhere + def flatMap[B](f: A => FutureUnlessShutdown[B])(implicit + ec: ExecutionContext + ): FutureUnlessShutdown[B] = + FlatMap[FutureUnlessShutdown].flatMap(self)(f) + + // This method is here so that we don't need to import ```cats.syntax.functor._``` everywhere + def map[B](f: A => B)(implicit ec: ExecutionContext): FutureUnlessShutdown[B] = + Functor[FutureUnlessShutdown].map(self)(f) + + def subflatMap[B](f: A => UnlessShutdown[B])(implicit + ec: ExecutionContext + ): FutureUnlessShutdown[B] = + FutureUnlessShutdown(self.unwrap.map(_.flatMap(f))) + + def flatten[S](implicit + ec: ExecutionContext, + ev: A <:< FutureUnlessShutdown[S], + ): FutureUnlessShutdown[S] = + self.flatMap(ev) + + /** Analog to [[scala.concurrent.Future]].recover */ + def recover[U >: A]( + pf: PartialFunction[Throwable, UnlessShutdown[U]] + )(implicit executor: ExecutionContext): FutureUnlessShutdown[U] = + transform[U] { (value: Try[UnlessShutdown[A]]) => + value recover pf + } + } + + /** Cats monad instance for the combination of [[scala.concurrent.Future]] with [[UnlessShutdown]]. + * [[UnlessShutdown.AbortedDueToShutdown]] short-circuits sequencing. + */ + private def monadFutureUnlessShutdownOpened(implicit + ec: ExecutionContext + ): MonadThrow[λ[α => Future[UnlessShutdown[α]]]] = + new MonadThrow[λ[α => Future[UnlessShutdown[α]]]] { + override def pure[A](x: A): Future[UnlessShutdown[A]] = + Future.successful(UnlessShutdown.Outcome(x)) + + override def flatMap[A, B]( + a: Future[UnlessShutdown[A]] + )(f: A => Future[UnlessShutdown[B]]): Future[UnlessShutdown[B]] = + a.flatMap { + case UnlessShutdown.Outcome(x) => f(x) + case UnlessShutdown.AbortedDueToShutdown => + Future.successful(UnlessShutdown.AbortedDueToShutdown) + } + + override def tailRecM[A, B]( + a: A + )(f: A => Future[UnlessShutdown[Either[A, B]]]): Future[UnlessShutdown[B]] = + Monad[Future].tailRecM(a)(a0 => + f(a0).map { + case UnlessShutdown.AbortedDueToShutdown => Right(UnlessShutdown.AbortedDueToShutdown) + case UnlessShutdown.Outcome(Left(a1)) => Left(a1) + case UnlessShutdown.Outcome(Right(b)) => Right(UnlessShutdown.Outcome(b)) + } + ) + + override def raiseError[A](e: Throwable): Future[UnlessShutdown[A]] = Future.failed(e) + + override def handleErrorWith[A]( + fa: Future[UnlessShutdown[A]] + )(f: Throwable => Future[UnlessShutdown[A]]): Future[UnlessShutdown[A]] = { + fa.recoverWith { case throwable => f(throwable) } + } + } + + implicit def catsStdInstFutureUnlessShutdown(implicit + ec: ExecutionContext + ): MonadThrow[FutureUnlessShutdown] = + Instance.subst[MonadThrow](monadFutureUnlessShutdownOpened) + + implicit def monoidFutureUnlessShutdown[A](implicit + M: Monoid[A], + ec: ExecutionContext, + ): Monoid[FutureUnlessShutdown[A]] = { + type K[T[_]] = Monoid[T[A]] + Instance.subst[K](Monoid[Future[UnlessShutdown[A]]]) + } + + private def parallelApplicativeFutureUnlessShutdownOpened(implicit + ec: ExecutionContext + ): Applicative[Lambda[alpha => Future[UnlessShutdown[alpha]]]] = + new Applicative[Lambda[alpha => Future[UnlessShutdown[alpha]]]] { + private val applicativeUnlessShutdown = Applicative[UnlessShutdown] + + override def pure[A](x: A): Future[UnlessShutdown[A]] = + Future.successful(UnlessShutdown.Outcome(x)) + + override def ap[A, B](ff: Future[UnlessShutdown[A => B]])( + fa: Future[UnlessShutdown[A]] + ): Future[UnlessShutdown[B]] = ff.zipWith(fa)((f, a) => applicativeUnlessShutdown.ap(f)(a)) + } + + def parallelApplicativeFutureUnlessShutdown(implicit + ec: ExecutionContext + ): Applicative[FutureUnlessShutdown] = + Instance.subst[Applicative](parallelApplicativeFutureUnlessShutdownOpened) + + private def parallelInstanceFutureUnlessShutdownOpened(implicit + ec: ExecutionContext + ): Parallel[Lambda[alpha => Future[UnlessShutdown[alpha]]]] = + new Parallel[Lambda[alpha => Future[UnlessShutdown[alpha]]]] { + override type F[X] = Future[UnlessShutdown[X]] + + override def applicative: Applicative[F] = parallelApplicativeFutureUnlessShutdownOpened + + override def monad: Monad[Lambda[alpha => Future[UnlessShutdown[alpha]]]] = + monadFutureUnlessShutdownOpened + + override def sequential: F ~> Lambda[alpha => Future[UnlessShutdown[alpha]]] = FunctionK.id + + override def parallel: Lambda[alpha => Future[UnlessShutdown[alpha]]] ~> F = FunctionK.id + } + + implicit def parallelInstanceFutureUnlessShutdown(implicit + ec: ExecutionContext + ): Parallel[FutureUnlessShutdown] = + Instance.subst[Parallel](parallelInstanceFutureUnlessShutdownOpened) + + class FutureUnlessShutdownThereafter extends Thereafter[FutureUnlessShutdown] { + override type Content[A] = FutureUnlessShutdownThereafterContent[A] + override def thereafter[A](f: FutureUnlessShutdown[A])(body: Try[UnlessShutdown[A]] => Unit)( + implicit ec: ExecutionContext + ): FutureUnlessShutdown[A] = + FutureUnlessShutdown(f.unwrap.thereafter(body)) + + override def thereafterF[A](f: FutureUnlessShutdown[A])( + body: Try[UnlessShutdown[A]] => Future[Unit] + )(implicit ec: ExecutionContext): FutureUnlessShutdown[A] = { + FutureUnlessShutdown(Thereafter[Future].thereafterF(f.unwrap)(body)) + } + } + + /** Use a type synonym instead of a type lambda so that the Scala compiler does not get confused during implicit resolution, + * at least for simple cases. + */ + type FutureUnlessShutdownThereafterContent[A] = Try[UnlessShutdown[A]] + implicit val thereafterFutureUnlessShutdown + : Thereafter.Aux[FutureUnlessShutdown, FutureUnlessShutdownThereafterContent] = + new FutureUnlessShutdownThereafter + + /** Enable `onShutdown` syntax on [[cats.data.EitherT]]`[`[[FutureUnlessShutdown]]`...]`. */ + implicit class EitherTOnShutdownSyntax[A, B]( + private val eitherT: EitherT[FutureUnlessShutdown, A, B] + ) extends AnyVal { + def onShutdown[C >: A, D >: B](f: => Either[C, D])(implicit + ec: ExecutionContext + ): EitherT[Future, C, D] = + EitherT(eitherT.value.onShutdown(f)) + + /** Evaluates `f` on shutdown but retains the result of the future. */ + def tapOnShutdown(f: => Unit)(implicit + ec: ExecutionContext, + errorLoggingContext: ErrorLoggingContext, + ): EitherT[FutureUnlessShutdown, A, B] = + EitherT(eitherT.value.tapOnShutdown(f)) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/Lifecycle.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/Lifecycle.scala new file mode 100644 index 0000000000..3c7392ebf1 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/Lifecycle.scala @@ -0,0 +1,190 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.lifecycle + +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.logging.{ErrorLoggingContext, TracedLogger} +import com.digitalasset.canton.tracing.{NoTracing, TraceContext} +import com.digitalasset.canton.util.ShowUtil.* +import io.grpc.{ManagedChannel, Server} +import org.apache.pekko.actor.ActorSystem +import org.apache.pekko.stream.Materializer +import org.slf4j.event.Level + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.* +import scala.util.Try + +/** Utilities for working with instances that support our lifecycle pattern. + */ +object Lifecycle extends NoTracing { + + /** How long will we wait for a graceful shutdown to complete + */ + private val defaultGracefulShutdownTimeout = 3.seconds + + /** How long will we wait for a forced shutdown to complete + */ + private val defaultForcedShutdownTimeout = 3.seconds + + /** Successful shutdowns taking longer than this will be reported */ + private val slowShutdownThreshold = 1.seconds + + /** Attempts to close all provided closable instances. + * Instances are closed in the order that they are provided. + * These instances are expected to synchronously close or throw. + * If an exception is encountered when closing an instances, we will still attempt to close other closeables + * and then throw a [[ShutdownFailedException]]. + * Exceptions thrown by `close` will be logged and + * the names of failed instances are wrapped into the [[ShutdownFailedException]]. + */ + def close(instances: AutoCloseable*)(logger: TracedLogger): Unit = { + def stopSingle(instance: AutoCloseable): Option[String] = { + val prettiedInstance = show"${instance.toString.singleQuoted}" + logger.debug(s"Attempting to close $prettiedInstance...") + val mbException = Try(instance.close()) + .fold( + err => Some(err), + _ => None, + ) // we're only interested in the error, so basically invert `Try` + mbException.fold { + logger.debug(s"Successfully closed $prettiedInstance.") + } { ex => + logger.warn(s"Closing $prettiedInstance failed! Reason:", ex) + } + mbException.map(_ => instance.toString) + } + + // Do not use mapFilter here because mapFilter does not guarantee to work from left to right + val failedInstances = instances.foldLeft(Seq.empty[String]) { (acc, instance) => + acc ++ stopSingle(instance).toList + } + + NonEmpty.from(failedInstances).foreach { i => throw new ShutdownFailedException(i) } + } + + def toCloseableOption[A <: AutoCloseable](maybeClosable: Option[A]): AutoCloseable = + () => maybeClosable.foreach(_.close()) + + def toCloseableActorSystem( + system: ActorSystem, + logger: TracedLogger, + timeouts: ProcessingTimeout, + ): AutoCloseable = + new AutoCloseable() { + private val name = system.name + override def close(): Unit = { + implicit val loggingContext: ErrorLoggingContext = + ErrorLoggingContext.fromTracedLogger(logger)(TraceContext.empty) + timeouts.shutdownProcessing.await_(s"Actor system ($name)", logFailing = Some(Level.WARN))( + system.terminate() + ) + } + override def toString: String = s"Actor system ($name)" + } + + def toCloseableMaterializer(mat: Materializer, name: String): AutoCloseable = + new AutoCloseable { + override def close(): Unit = mat.shutdown() + override def toString: String = s"Materializer ($name)" + } + + def toCloseableChannel( + channel: ManagedChannel, + logger: TracedLogger, + name: String, + ): CloseableChannel = + new CloseableChannel(channel, logger, name) + def toCloseableServer(server: Server, logger: TracedLogger, name: String): CloseableServer = + new CloseableServer(server, logger, name) + + class CloseableChannel(val channel: ManagedChannel, logger: TracedLogger, name: String) + extends AutoCloseable { + override def close(): Unit = { + shutdownResource( + s"channel: $channel ($name)", + () => { val _ = channel.shutdown() }, + () => { val _ = channel.shutdownNow() }, + _ => true, + duration => channel.awaitTermination(duration.toMillis, TimeUnit.MILLISECONDS), + defaultGracefulShutdownTimeout, + defaultForcedShutdownTimeout, + logger, + verbose = false, + ) + } + + override def toString: String = s"ManagedChannel ($name)" + } + + class CloseableServer(val server: Server, logger: TracedLogger, name: String) + extends AutoCloseable { + override def close(): Unit = { + shutdownResource( + s"server: $server (${name})", + () => { val _ = server.shutdown() }, + () => { val _ = server.shutdownNow }, + _ => true, + duration => server.awaitTermination(duration.toMillis, TimeUnit.MILLISECONDS), + defaultGracefulShutdownTimeout, + defaultForcedShutdownTimeout, + logger, + ) + } + override def toString: String = s"ManagedServer (${name})" + } + + def shutdownResource[A]( + name: String, + shutdown: () => Unit, + shutdownNow: () => Unit, + awaitIdleness: FiniteDuration => Boolean, + awaitTermination: FiniteDuration => Boolean, + gracefulTimeout: FiniteDuration, + forcedTimeout: FiniteDuration, + logger: TracedLogger, + verbose: Boolean = true, + ): Unit = { + val started = Deadline.now + + def forceShutdown(message: String): Unit = { + logger.warn(s"$name: $message") + shutdownNow() + if (!awaitTermination(forcedTimeout)) { + logger.error(s"$name: failed to terminate within $forcedTimeout when forced.") + } + } + + try { + // give resource a little time to reach an idle state + if (verbose) + logger.debug(s"About to close $name within allotted $gracefulTimeout.") + val idle = awaitIdleness(gracefulTimeout) + if (!idle) { + logger.info(s"$name: idleness not reached within allotted $gracefulTimeout.") + } + + // give resource a little time to politely shutdown + shutdown() + val terminated = awaitTermination(gracefulTimeout) + + if (!terminated) { + forceShutdown(s"shutdown did not complete gracefully in allotted $gracefulTimeout.") + } else { + val end = Deadline.now + if (started + slowShutdownThreshold < end) { + logger.info(s"$name: Slow shutdown of ${end - started}.") + } else if (verbose) { + logger.debug(s"Closed $name after ${end - started}.") + } + } + } catch { + case _: InterruptedException => + forceShutdown("Interrupt during shutdown. Forcing shutdown now.") + // preserve interrupt status + Thread.currentThread().interrupt() + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/OnShutdownRunner.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/OnShutdownRunner.scala new file mode 100644 index 0000000000..9ccc803f8a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/OnShutdownRunner.scala @@ -0,0 +1,124 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.lifecycle + +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.logging.TracedLogger +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.TryUtil.* +import com.google.common.annotations.VisibleForTesting + +import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong} +import scala.collection.concurrent.TrieMap +import scala.util.Try + +trait OnShutdownRunner { this: AutoCloseable => + + private val closingFlag: AtomicBoolean = new AtomicBoolean(false) + + private val incrementor: AtomicLong = new AtomicLong(0L) + private val onShutdownTasks: TrieMap[Long, RunOnShutdown] = TrieMap.empty[Long, RunOnShutdown] + + protected def logger: TracedLogger + + /** Check whether we're closing. + * Susceptible to race conditions; unless you're using using this as a flag to the retry lib or you really know + * what you're doing, prefer `performUnlessClosing` and friends. + */ + def isClosing: Boolean = closingFlag.get() + + /** Register a task to run when shutdown is initiated. + * + * You can use this for example to register tasks that cancel long-running computations, + * whose termination you can then wait for in "closeAsync". + */ + def runOnShutdown_[T]( + task: RunOnShutdown + )(implicit traceContext: TraceContext): Unit = { + runOnShutdown(task).discard + } + + /** Same as [[runOnShutdown_]] but returns a token that allows you to remove the task explicitly from being run + * using [[cancelShutdownTask]] + */ + def runOnShutdown[T]( + task: RunOnShutdown + )(implicit traceContext: TraceContext): Long = { + val token = incrementor.getAndIncrement() + onShutdownTasks + // First remove the tasks that are done + .filterInPlace { case (_, run) => + !run.done + } + // Then add the new one + .put(token, task) + .discard + if (isClosing) runOnShutdownTasks() + token + } + + /** Removes a shutdown task from the list using a token returned by [[runOnShutdown]] + */ + def cancelShutdownTask(token: Long): Unit = onShutdownTasks.remove(token).discard + def containsShutdownTask(token: Long): Boolean = onShutdownTasks.contains(token) + + private def runOnShutdownTasks()(implicit traceContext: TraceContext): Unit = { + onShutdownTasks.toList.foreach { case (token, task) => + Try { + onShutdownTasks + .remove(token) + .filterNot(_.done) + // TODO(#8594) Time limit the shutdown tasks similar to how we time limit the readers in FlagCloseable + .foreach(_.run()) + }.forFailed(t => logger.warn(s"Task ${task.name} failed on shutdown!", t)) + } + } + + @VisibleForTesting + protected def runStateChanged(waitingState: Boolean = false): Unit = {} // used for unit testing + + protected def onFirstClose(): Unit + + /** Blocks until all earlier tasks have completed and then prevents further tasks from being run. + */ + protected[this] override def close(): Unit = { + import TraceContext.Implicits.Empty.* + + val firstCallToClose = closingFlag.compareAndSet(false, true) + runStateChanged() + if (firstCallToClose) { + // First run onShutdown tasks. + // Important to run them in the beginning as they may be used to cancel long-running tasks. + runOnShutdownTasks() + + onFirstClose() + } else { + // TODO(i8594): Ensure we call close only once + } + } +} + +object OnShutdownRunner { + + /** A closeable container for managing [[RunOnShutdown]] tasks and nothing else. */ + class PureOnShutdownRunner(override protected val logger: TracedLogger) + extends AutoCloseable + with OnShutdownRunner { + override protected def onFirstClose(): Unit = () + override def close(): Unit = super.close() + } +} + +/** Trait that can be registered with a [FlagCloseable] to run on shutdown */ +trait RunOnShutdown { + + /** the name, used for logging during shutdown */ + def name: String + + /** true if the task has already run (maybe elsewhere) */ + def done: Boolean + + /** invoked by [FlagCloseable] during shutdown */ + def run(): Unit +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/PerformUnlessClosing.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/PerformUnlessClosing.scala new file mode 100644 index 0000000000..c906e83c92 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/PerformUnlessClosing.scala @@ -0,0 +1,271 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.lifecycle + +import cats.data.EitherT +import cats.syntax.traverse.* +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.concurrent.Threading +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.{Checked, CheckedT} + +import java.util.concurrent.atomic.AtomicReference +import scala.collection.immutable.MultiSet +import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{ExecutionContext, Future} +import scala.util.Try +import scala.util.control.NonFatal + +/** Provides a way to synchronize closing with other running tasks in the class, such that new tasks aren't scheduled + * while closing, and such that closing waits for the scheduled tasks. + * + * Use this type to pass such synchronization objects to other objects that merely need to synchronize, + * but should not be able to initiate closing themselves. To that end, this trait does not expose + * the [[java.lang.AutoCloseable.close]] method. + * + * @see FlagCloseable does expose the [[java.lang.AutoCloseable.close]] method. + */ +trait PerformUnlessClosing extends OnShutdownRunner { this: AutoCloseable => + import PerformUnlessClosing.* + + protected def closingTimeout: FiniteDuration + + /** Poor man's read-write lock; stores the number of tasks holding the read lock. If a write lock is held, this + * goes to -1. Not using Java's ReadWriteLocks since they are about thread synchronization, and since we can't + * count on acquires and releases happening on the same thread, since we support the synchronization of futures. + */ + private val readerState = new AtomicReference(ReaderState.empty) + + /** How often to poll to check that all tasks have completed. */ + protected def maxSleepMillis: Long = 500 + + /** Performs the task given by `f` unless a shutdown has been initiated. + * The shutdown will only begin after `f` completes, but other tasks may execute concurrently with `f`, if started using this + * function, or one of the other variants ([[performUnlessClosingF]] and [[performUnlessClosingEitherT]]). + * The tasks are assumed to take less than [[closingTimeout]] to complete. + * + * DO NOT CALL `this.close` as part of `f`, because it will result in a deadlock. + * + * @param f The task to perform + * @return [[scala.None$]] if a shutdown has been initiated. Otherwise the result of the task. + */ + def performUnlessClosing[A]( + name: String + )(f: => A)(implicit traceContext: TraceContext): UnlessShutdown[A] = { + if (isClosing || !addReader(name)) { + logger.debug(s"Won't schedule the task '$name' as this object is closing") + UnlessShutdown.AbortedDueToShutdown + } else + try { + UnlessShutdown.Outcome(f) + } finally { + removeReader(name) + } + } + + /** Performs the Future given by `f` unless a shutdown has been initiated. The future is lazy and not evaluated during shutdown. + * The shutdown will only begin after `f` completes, but other tasks may execute concurrently with `f`, if started using this + * function, or one of the other variants ([[performUnlessClosing]] and [[performUnlessClosingEitherT]]). + * The tasks are assumed to take less than [[closingTimeout]] to complete. + * + * DO NOT CALL `this.close` as part of `f`, because it will result in a deadlock. + * + * @param f The task to perform + * @return The future completes with [[com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown]] if + * a shutdown has been initiated. + * Otherwise the result of the task wrapped in [[com.digitalasset.canton.lifecycle.UnlessShutdown.Outcome]]. + */ + def performUnlessClosingF[A](name: String)( + f: => Future[A] + )(implicit ec: ExecutionContext, traceContext: TraceContext): FutureUnlessShutdown[A] = + FutureUnlessShutdown(internalPerformUnlessClosingF(name)(f).sequence) + + def performUnlessClosingUSF[A](name: String)( + f: => FutureUnlessShutdown[A] + )(implicit ec: ExecutionContext, traceContext: TraceContext): FutureUnlessShutdown[A] = + performUnlessClosingF(name)(f.unwrap).subflatMap(Predef.identity) + + protected def internalPerformUnlessClosingF[A](name: String)( + f: => Future[A] + )(implicit ec: ExecutionContext, traceContext: TraceContext): UnlessShutdown[Future[A]] = { + if (isClosing || !addReader(name)) { + logger.debug(s"Won't schedule the future '$name' as this object is closing") + UnlessShutdown.AbortedDueToShutdown + } else { + val fut = Try(f).fold(Future.failed, x => x).thereafter { _ => + removeReader(name) + } + trackFuture(fut) + UnlessShutdown.Outcome(fut) + } + } + + /** Performs the EitherT[Future] given by `etf` unless a shutdown has been initiated, in which case the provided error is returned instead. + * Both `etf` and the error are lazy; `etf` is only evaluated if there is no shutdown, the error only if we're shutting down. + * The shutdown will only begin after `etf` completes, but other tasks may execute concurrently with `etf`, if started using this + * function, or one of the other variants ([[performUnlessClosing]] and [[performUnlessClosingF]]). + * The tasks are assumed to take less than [[closingTimeout]] to complete. + * + * DO NOT CALL `this.close` as part of `etf`, because it will result in a deadlock. + * + * @param etf The task to perform + */ + def performUnlessClosingEitherT[E, R](name: String, onClosing: => E)( + etf: => EitherT[Future, E, R] + )(implicit ec: ExecutionContext, traceContext: TraceContext): EitherT[Future, E, R] = { + EitherT(performUnlessClosingF(name)(etf.value).unwrap.map(_.onShutdown(Left(onClosing)))) + } + + def performUnlessClosingEitherU[E, R](name: String)( + etf: => EitherT[Future, E, R] + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): EitherT[FutureUnlessShutdown, E, R] = { + EitherT(performUnlessClosingF(name)(etf.value)) + } + + def performUnlessClosingEitherUSF[E, R](name: String)( + etf: => EitherT[FutureUnlessShutdown, E, R] + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): EitherT[FutureUnlessShutdown, E, R] = { + EitherT(performUnlessClosingUSF(name)(etf.value)) + } + + def performUnlessClosingCheckedT[A, N, R](name: String, onClosing: => Checked[A, N, R])( + etf: => CheckedT[Future, A, N, R] + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): CheckedT[Future, A, N, R] = { + CheckedT(performUnlessClosingF(name)(etf.value).unwrap.map(_.onShutdown(onClosing))) + } + + def performUnlessClosingEitherTF[E, R](name: String, onClosing: => E)( + etf: => EitherT[Future, E, Future[R]] + )(implicit ec: ExecutionContext, traceContext: TraceContext): EitherT[Future, E, Future[R]] = { + if (isClosing || !addReader(name)) { + logger.debug(s"Won't schedule the future '$name' as this object is closing") + EitherT.leftT(onClosing) + } else { + val res = Try(etf.value).fold(Future.failed, x => x) + trackFuture(res) + val _ = res + .flatMap { + case Left(_) => Future.unit + case Right(value) => value.map(_ => ()) + } + .thereafter { _ => + removeReader(name) + } + EitherT(res) + } + } + + /** track running futures on shutdown + * + * set to true to get detailed information about all futures that did not complete during + * shutdown. if set to false, we don't do anything. + */ + protected def keepTrackOfOpenFutures: Boolean = false + + private val scheduled = new AtomicReference[Seq[RunningFuture]](Seq()) + + private def trackFuture(fut: Future[Any])(implicit executionContext: ExecutionContext): Unit = + if (keepTrackOfOpenFutures) { + val ex = new Exception("location") + Future { + scheduled + .updateAndGet(x => x.filterNot(_.fut.isCompleted) :+ RunningFuture(fut, ex)) + }.discard + } + + private def dumpRunning()(implicit traceContext: TraceContext): Unit = { + scheduled.updateAndGet(x => x.filterNot(_.fut.isCompleted)).foreach { cur => + logger.debug("Future created from here is still running", cur.location) + } + } + + protected def onClosed(): Unit = () + + protected def onCloseFailure(e: Throwable): Unit = throw e + + /** Blocks until all earlier tasks have completed and then prevents further tasks from being run. + */ + @SuppressWarnings(Array("org.wartremover.warts.While", "org.wartremover.warts.Var")) + final override def onFirstClose(): Unit = { + import TraceContext.Implicits.Empty.* + + /* closingFlag has already been set to true. This ensures that we can shut down cleanly, unless one of the + readers takes longer to complete than the closing timeout. After the flag is set to true, the readerCount + can only decrease (since it only increases in performUnlessClosingF, and since the || there short-circuits). + */ + // Poll for tasks to finish. Inefficient, but we're only doing this during shutdown. + val deadline = closingTimeout.fromNow + var sleepMillis = 1L + while ( + (readerState.getAndUpdate { current => + if (current == ReaderState.empty) { + current.copy(count = -1) + } else current + }.count != 0) && deadline.hasTimeLeft() + ) { + val readers = readerState.get() + logger.debug( + s"${readers.count} active tasks (${readers.readers.mkString(",")}) preventing closing; sleeping for ${sleepMillis}ms" + ) + runStateChanged(true) + Threading.sleep(sleepMillis) + sleepMillis = (sleepMillis * 2) min maxSleepMillis min deadline.timeLeft.toMillis + } + if (readerState.get.count >= 0) { + logger.warn( + s"Timeout ${closingTimeout} expired, but tasks still running. ${forceShutdownStr}" + ) + dumpRunning() + } + if (keepTrackOfOpenFutures) { + logger.warn("Tracking of open futures is enabled, but this is only meant for debugging!") + } + try { + onClosed() + } catch { + case NonFatal(e) => onCloseFailure(e) + } + } + + private def addReader(reader: String): Boolean = + (readerState.updateAndGet { case state @ ReaderState(cnt, readers) => + if (cnt == Int.MaxValue) + throw new IllegalStateException("Overflow on active reader locks") + if (cnt >= 0) { + ReaderState(cnt + 1, readers + reader) + } else state + }).count > 0 + + private def removeReader(reader: String): Unit = { + val _ = readerState.updateAndGet { case ReaderState(cnt, readers) => + if (cnt <= 0) + throw new IllegalStateException("No active readers, but still trying to deactivate one") + ReaderState(cnt - 1, readers - reader) + } + } +} + +object PerformUnlessClosing { + + /** Logged upon forced shutdown. Pulled out a string here so that test log checking can refer to it. */ + val forceShutdownStr = "Shutting down forcibly" + + private final case class ReaderState(count: Int, readers: MultiSet[String]) + + private object ReaderState { + val empty: ReaderState = ReaderState(0, MultiSet.empty) + } + + private final case class RunningFuture(fut: Future[Any], location: Exception) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/PromiseUnlessShutdown.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/PromiseUnlessShutdown.scala new file mode 100644 index 0000000000..e67773c998 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/PromiseUnlessShutdown.scala @@ -0,0 +1,53 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.lifecycle + +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.concurrent.{FutureSupervisor, SupervisedPromise} +import com.digitalasset.canton.logging.ErrorLoggingContext +import org.slf4j.event.Level + +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future, Promise} +import scala.util.Try + +/** A wrapper for Promise that provides supervision of uncompleted promise's futures and aborting a promise due to shutdown */ +class PromiseUnlessShutdown[A]( + description: String, + futureSupervisor: FutureSupervisor, + logAfter: Duration = 10.seconds, + logLevel: Level = Level.DEBUG, +)(implicit + ecl: ErrorLoggingContext, + ec: ExecutionContext, +) extends Promise[UnlessShutdown[A]] + with RunOnShutdown { + + private val promise: SupervisedPromise[UnlessShutdown[A]] = + new SupervisedPromise[UnlessShutdown[A]](description, futureSupervisor, logAfter, logLevel) + + override def future: Future[UnlessShutdown[A]] = promise.future + + override def isCompleted: Boolean = promise.isCompleted + + override def tryComplete(result: Try[UnlessShutdown[A]]): Boolean = promise.tryComplete(result) + + def completeWith(other: FutureUnlessShutdown[A]): PromiseUnlessShutdown.this.type = + super.completeWith(other.unwrap) + + def futureUS: FutureUnlessShutdown[A] = FutureUnlessShutdown(future) + + /** Complete the promise with an outcome value. + * If the promise has already been completed with an outcome, the new outcome will be ignored. + */ + def outcome(value: A): Unit = + super.trySuccess(UnlessShutdown.Outcome(value)).discard + + /** Complete the promise with a shutdown */ + def shutdown(): Unit = promise.trySuccess(UnlessShutdown.AbortedDueToShutdown).discard + + override def name: String = description + override def done: Boolean = isCompleted + override def run(): Unit = shutdown() +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/ShutdownFailedException.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/ShutdownFailedException.scala new file mode 100644 index 0000000000..f4f4413806 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/ShutdownFailedException.scala @@ -0,0 +1,10 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.lifecycle + +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.util.ShowUtil.* + +class ShutdownFailedException(instances: NonEmpty[Seq[String]]) + extends RuntimeException(show"Unable to close ${instances.map(_.singleQuoted)}.") diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/UnlessShutdown.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/UnlessShutdown.scala new file mode 100644 index 0000000000..f49a35b315 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/UnlessShutdown.scala @@ -0,0 +1,126 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.lifecycle + +import cats.{Applicative, Eval, Monad, Monoid, Traverse} + +import scala.annotation.tailrec + +/** The outcome of a computation ([[UnlessShutdown.Outcome]]) + * unless the computation has aborted due to a shutdown ([[UnlessShutdown.AbortedDueToShutdown]]). + * + * A copy of [[scala.Option]]. We use a separate class to document the purpose. + * + * @tparam A The type of the outcome. + */ +sealed trait UnlessShutdown[+A] extends Product with Serializable { + + /** Applies the function to the outcome if available */ + def foreach(f: A => Unit): Unit + + /** Transforms the outcome using the given function. */ + def map[B](f: A => B): UnlessShutdown[B] + + /** Monadically chain two outcome computations. Abortion due to shutdown propagates. */ + def flatMap[B](f: A => UnlessShutdown[B]): UnlessShutdown[B] + + /** Applicative traverse for outcome computations. The given function is not applied upon abortion. */ + def traverse[F[_], B](f: A => F[B])(implicit F: Applicative[F]): F[UnlessShutdown[B]] + + /** Convert the outcome into an [[scala.Right$]] or [[scala.Left$]]`(aborted)` upon abortion. */ + def toRight[L](aborted: => L): Either[L, A] + + /** Evaluate the argument upon abortion and otherwise return the outcome + * + * Analogue to [[scala.Option.getOrElse]]. + */ + def onShutdown[B >: A](ifShutdown: => B): B + + /** Returns whether the outcome is an actual outcome */ + def isOutcome: Boolean +} + +object UnlessShutdown { + final case class Outcome[+A](result: A) extends UnlessShutdown[A] { + override def foreach(f: A => Unit): Unit = f(result) + override def map[B](f: A => B): Outcome[B] = Outcome(f(result)) + override def flatMap[B](f: A => UnlessShutdown[B]): UnlessShutdown[B] = f(result) + override def traverse[F[_], B](f: A => F[B])(implicit F: Applicative[F]): F[UnlessShutdown[B]] = + F.map(f(result))(Outcome(_)) + override def toRight[L](aborted: => L): Either[L, A] = Right(result) + override def onShutdown[B >: A](ifShutdown: => B): A = result + override def isOutcome: Boolean = true + } + + case object AbortedDueToShutdown extends UnlessShutdown[Nothing] { + override def foreach(f: Nothing => Unit): Unit = () + override def map[B](f: Nothing => B): AbortedDueToShutdown = this + override def flatMap[B](f: Nothing => UnlessShutdown[B]): AbortedDueToShutdown = this + override def traverse[F[_], B](f: Nothing => F[B])(implicit + F: Applicative[F] + ): F[UnlessShutdown[B]] = F.pure(this) + override def toRight[L](aborted: => L): Either[L, Nothing] = Left(aborted) + override def onShutdown[B >: Nothing](ifShutdown: => B): B = ifShutdown + override def isOutcome: Boolean = false + } + type AbortedDueToShutdown = AbortedDueToShutdown.type + + val unit: UnlessShutdown[Unit] = Outcome(()) + + def fromOption[A](x: Option[A]): UnlessShutdown[A] = + x.fold[UnlessShutdown[A]](AbortedDueToShutdown)(Outcome.apply) + + /** Cats traverse and monad instance for [[UnlessShutdown]]. + * + * [[AbortedDueToShutdown]] propagates. + */ + implicit val catsStdInstsUnlessShutdown: Traverse[UnlessShutdown] with Monad[UnlessShutdown] = + new Traverse[UnlessShutdown] with Monad[UnlessShutdown] { + override def flatMap[A, B](x: UnlessShutdown[A])( + f: A => UnlessShutdown[B] + ): UnlessShutdown[B] = x.flatMap(f) + + override def tailRecM[A, B](a: A)(f: A => UnlessShutdown[Either[A, B]]): UnlessShutdown[B] = { + @tailrec def go(s: A): UnlessShutdown[B] = f(s) match { + case Outcome(Left(next)) => go(next) + case Outcome(Right(done)) => Outcome(done) + case AbortedDueToShutdown => AbortedDueToShutdown + } + go(a) + } + + override def pure[A](x: A): UnlessShutdown[A] = Outcome(x) + + override def traverse[G[_], A, B](x: UnlessShutdown[A])(f: A => G[B])(implicit + G: Applicative[G] + ): G[UnlessShutdown[B]] = + x.traverse(f) + + override def foldLeft[A, B](x: UnlessShutdown[A], b: B)(f: (B, A) => B): B = x match { + case Outcome(result) => f(b, result) + case AbortedDueToShutdown => b + } + + override def foldRight[A, B](x: UnlessShutdown[A], lb: Eval[B])( + f: (A, Eval[B]) => Eval[B] + ): Eval[B] = x match { + case Outcome(result) => f(result, lb) + case AbortedDueToShutdown => lb + } + } + + /** Lift a [[cats.Monoid]] on outcomes to [[UnlessShutdown]]. + * [[AbortedDueToShutdown]] cancels. + */ + implicit def monoidUnlessShutdown[A](implicit monoid: Monoid[A]): Monoid[UnlessShutdown[A]] = + new Monoid[UnlessShutdown[A]] { + override def empty: UnlessShutdown[A] = Outcome(monoid.empty) + + override def combine(x1: UnlessShutdown[A], x2: UnlessShutdown[A]): UnlessShutdown[A] = + x1 match { + case Outcome(y1) => x2.map(monoid.combine(y1, _)) + case AbortedDueToShutdown => AbortedDueToShutdown + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/package.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/package.scala new file mode 100644 index 0000000000..4b09bc0b89 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/lifecycle/package.scala @@ -0,0 +1,13 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton + +package object lifecycle { + + /** The monad combination of [[scala.concurrent.Future]] with [[UnlessShutdown]] as an abstract type + * + * @see FutureUnlessShutdownSig.Ops for extension methods on the abstract type + */ + type FutureUnlessShutdown[+A] = FutureUnlessShutdownImpl.Instance.FutureUnlessShutdown[A] +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/LastErrorsAppender.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/LastErrorsAppender.scala new file mode 100644 index 0000000000..e8d246e2ca --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/LastErrorsAppender.scala @@ -0,0 +1,187 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.logging + +import cats.syntax.functor.* +import cats.syntax.option.* +import ch.qos.logback.classic.Level +import ch.qos.logback.classic.spi.ILoggingEvent +import ch.qos.logback.core.spi.AppenderAttachable +import ch.qos.logback.core.{Appender, AppenderBase} +import com.digitalasset.canton.DiscardOps +import com.github.blemale.scaffeine.Scaffeine + +import java.util +import scala.collection.mutable + +/** Logback appender that keeps a bounded queue of errors/warnings that have been logged and associated log entries with + * the same trace-id. + */ +@SuppressWarnings(Array("org.wartremover.warts.Var")) +class LastErrorsAppender() + extends AppenderBase[ILoggingEvent] + with AppenderAttachable[ILoggingEvent] { + + private val appenders = mutable.ListBuffer[Appender[ILoggingEvent]]() + + /** Treat the last errors file appender separately because we only write there when we encounter an error */ + private var lastErrorsFileAppender: Option[Appender[ILoggingEvent]] = None + + private var maxTraces = 128 + private var maxEvents = 1024 + private var maxErrors = 128 + private var lastErrorsFileAppenderName = "" + + /** An error/warn event with previous events of the same trace-id */ + private case class ErrorWithEvents(error: ILoggingEvent, events: Seq[ILoggingEvent]) + + private val eventsCache = + Scaffeine().maximumSize(maxTraces.toLong).build[String, BoundedQueue[ILoggingEvent]]() + private val errorsCache = + Scaffeine().maximumSize(maxErrors.toLong).build[String, ErrorWithEvents]() + + private def isLastErrorsFileAppender(appender: Appender[ILoggingEvent]): Boolean = + appender.getName == lastErrorsFileAppenderName + + /** Return a map of traceId to error/warning event */ + def lastErrors: Map[String, ILoggingEvent] = errorsCache.asMap().toMap.fmap(_.error) + + /** Returns a trace (sequence of events with the same trace-id) for an error */ + def lastErrorTrace(traceId: String): Option[Seq[ILoggingEvent]] = + errorsCache.getIfPresent(traceId).map(_.events) + + /** Allows to override the maximum numbers of errors to keep from a logback.xml */ + def setMaxErrors(errors: Int): Unit = { + maxErrors = errors + } + + /** Allows to override the maximum events to keep per trace-id from a logback.xml */ + def setMaxEvents(events: Int): Unit = { + maxEvents = events + } + + /** Allows to override the maximum traces to keep from a logback.xml */ + def setMaxTraces(traces: Int): Unit = { + maxTraces = traces + } + + def setLastErrorsFileAppenderName(name: String): Unit = { + lastErrorsFileAppenderName = name + } + + private def getTraceId(event: ILoggingEvent): Option[String] = + Option(event.getMDCPropertyMap.get(CanLogTraceContext.traceIdMdcKey)) + + private def processError(event: ILoggingEvent): Unit = { + getTraceId(event).foreach { tid => + eventsCache.getIfPresent(tid).foreach { relatedEvents => + lazy val relatedEventsSeq = relatedEvents.toSeq + + // Update the errors cache with the new related events since the last flush or create a first error entry + errorsCache + .asMap() + .updateWith(tid) { + case Some(errorWithEvents) => + Some(errorWithEvents.copy(events = errorWithEvents.events ++ relatedEventsSeq)) + case None => Some(ErrorWithEvents(event, relatedEventsSeq)) + } + .discard + + // Flush error with related events to last errors file appender + lastErrorsFileAppender.foreach { appender => + relatedEvents.removeAll().foreach(appender.doAppend) + } + } + } + + } + + private def processEvent(event: ILoggingEvent): Unit = { + // Always cache the event if it has a trace-id + getTraceId(event).foreach { tid => + eventsCache.asMap().updateWith(tid) { eventsQ => + eventsQ.getOrElse(new BoundedQueue[ILoggingEvent](maxEvents)).enqueue(event).some + } + } + + // If the event is a warning or error, find the previous related events + if (event.getLevel.isGreaterOrEqual(Level.WARN)) + processError(event) + } + + override def append(event: ILoggingEvent): Unit = { + appenders.foreach(_.doAppend(event)) + processEvent(event) + } + + override def addAppender(newAppender: Appender[ILoggingEvent]): Unit = { + if (isLastErrorsFileAppender(newAppender)) + lastErrorsFileAppender = Some(newAppender) + else + appenders += newAppender + } + + override def iteratorForAppenders(): util.Iterator[Appender[ILoggingEvent]] = { + val it = lastErrorsFileAppender.iterator ++ appenders.iterator + new util.Iterator[Appender[ILoggingEvent]] { + override def hasNext: Boolean = it.hasNext + override def next(): Appender[ILoggingEvent] = it.next() + } + } + + override def getAppender(name: String): Appender[ILoggingEvent] = throw new NotImplementedError() + + override def isAttached(appender: Appender[ILoggingEvent]): Boolean = + appenders.contains(appender) || lastErrorsFileAppender.contains(appender) + + override def detachAndStopAllAppenders(): Unit = { + appenders.foreach(_.stop()) + appenders.clear() + lastErrorsFileAppender.foreach(_.stop()) + lastErrorsFileAppender = None + } + + override def stop(): Unit = { + super.stop() + appenders.foreach(_.stop()) + lastErrorsFileAppender.foreach(_.stop()) + } + + override def detachAppender(appender: Appender[ILoggingEvent]): Boolean = { + + if (isLastErrorsFileAppender(appender)) { + lastErrorsFileAppender = None + true + } else { + val index = appenders.indexOf(appender) + if (index > -1) { + appenders.remove(index).discard + } + index != -1 + } + } + + override def detachAppender(name: String): Boolean = throw new NotImplementedError() +} + +@SuppressWarnings(Array("org.wartremover.warts.While")) +class BoundedQueue[A](maxQueueSize: Int) extends mutable.Queue[A] { + + private def trim(): Unit = { + while (size > maxQueueSize) dequeue().discard + } + + override def addOne(elem: A): BoundedQueue.this.type = { + val ret = super.addOne(elem) + trim() + ret + } + + override def addAll(elems: IterableOnce[A]): BoundedQueue.this.type = { + val ret = super.addAll(elems) + trim() + ret + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/RewritingAppender.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/RewritingAppender.scala new file mode 100644 index 0000000000..30a3f9eab5 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/RewritingAppender.scala @@ -0,0 +1,189 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.logging + +import cats.syntax.functorFilter.* +import ch.qos.logback.classic +import ch.qos.logback.classic.spi.{ILoggingEvent, IThrowableProxy, LoggerContextVO} +import ch.qos.logback.core.spi.AppenderAttachable +import ch.qos.logback.core.{Appender, AppenderBase} +import com.digitalasset.canton.DiscardOps +import org.slf4j.Marker +import org.slf4j.event.KeyValuePair + +import java.util +import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference} +import scala.collection.mutable.ListBuffer + +/** Rewrite object used for logback.xml */ +final class Rewrite { + + val logger = new AtomicReference[String]("") + val level = new AtomicReference[classic.Level](classic.Level.ERROR) + // All contains strings must match the logger message for the rewrite + val contains = new AtomicReference[Seq[String]](Seq()) + val exceptionMessage = new AtomicReference[String]("") + val isTesting = new AtomicBoolean(false) + + def setMaxLevel(levelStr: String): Unit = level.set(classic.Level.toLevel(levelStr)) + def setLogger(loggerStr: String): Unit = logger.set(loggerStr) + def setContains(containsStr: String): Unit = { + val _ = contains.updateAndGet(_ :+ containsStr) + } + def setExceptionMessage(exceptionMessageStr: String): Unit = + exceptionMessage.set(exceptionMessageStr) + def setTesting(isTesting: Boolean): Unit = this.isTesting.set(isTesting) +} + +/** Rewriting log appender + * + * Allows to rewrite log levels of external appenders. A logback implementation of + * http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/rewrite/RewriteAppender.html + * + * If testing is set to true, we will include the rewrite rules that are marked as testing only. + * + * Usage: + * + * true + * + * + * io.grpc.netty.NettyServerStream + * INFO + * + * + * com.digitalasset.canton.participant.ParticipantNodeInit:test/ParticipantRestartTest/participant/participant1 + * Unclean shutdown due to cancellation in + * test exception + * INFO + * true + * + * + */ +class RewritingAppender() + extends AppenderBase[ILoggingEvent] + with AppenderAttachable[ILoggingEvent] { + + private case class MyRule( + maxLevel: classic.Level, + contains: Seq[String], + exceptionMessage: String, + ) + + private val appenders = ListBuffer[Appender[ILoggingEvent]]() + private val rules = new AtomicReference[Map[String, Seq[MyRule]]](Map()) + private val testing = new AtomicBoolean(false) + + private class ReLog(event: ILoggingEvent, level: classic.Level) extends ILoggingEvent { + override def getThreadName: String = event.getThreadName + override def getLevel: classic.Level = level + override def getMessage: String = event.getMessage + override def getArgumentArray: Array[AnyRef] = event.getArgumentArray + override def getFormattedMessage: String = event.getFormattedMessage + override def getLoggerName: String = event.getLoggerName + override def getLoggerContextVO: LoggerContextVO = event.getLoggerContextVO + override def getThrowableProxy: IThrowableProxy = event.getThrowableProxy + override def getCallerData: Array[StackTraceElement] = event.getCallerData + override def hasCallerData: Boolean = event.hasCallerData + override def getMDCPropertyMap: util.Map[String, String] = event.getMDCPropertyMap + override def getMdc: util.Map[String, String] = throw new NotImplementedError( + "deprecated method" + ) + override def getTimeStamp: Long = event.getTimeStamp + override def prepareForDeferredProcessing(): Unit = event.prepareForDeferredProcessing() + + override def getKeyValuePairs: util.List[KeyValuePair] = event.getKeyValuePairs + + override def getMarkerList: util.List[Marker] = event.getMarkerList + + override def getNanoseconds: Int = event.getNanoseconds + + override def getSequenceNumber: Long = event.getSequenceNumber + } + + def setTesting(isTesting: Boolean): Unit = { + this.testing.set(isTesting) + } + + def setRewrite(rule: Rewrite): Unit = { + if (!rule.isTesting.get() || testing.get) { + val _ = rules.updateAndGet { map => + val loggerName = rule.logger.get() + val current = map.getOrElse(loggerName, Seq()) + val myRule = MyRule(rule.level.get(), rule.contains.get(), rule.exceptionMessage.get()) + map + (loggerName -> (current :+ myRule)) + } + } + } + + override def append(event: ILoggingEvent): Unit = { + + def evalRule(rule: MyRule): Option[classic.Level] = + for { + _ <- if (rule.maxLevel.isGreaterOrEqual(event.getLevel)) None else Some(()) + _ <- + if (rule.contains.forall(r => event.getFormattedMessage.contains(r))) Some(()) + else None + _ <- + if ( + rule.exceptionMessage.isEmpty || + Option(event.getThrowableProxy).exists(_.getMessage.contains(rule.exceptionMessage)) + ) { + Some(()) + } else None + } yield rule.maxLevel + val forward = (for { + loggerRules <- getRules(event.getLoggerName) + maxLevel <- loggerRules.mapFilter(evalRule).headOption + } yield new ReLog(event, maxLevel)).getOrElse(event) + + appenders.foreach(_.doAppend(forward)) + } + + private def getRules(loggerName: String): Option[Seq[MyRule]] = { + def stripAfter(chr: Char): List[String] = { + val idx = loggerName.indexOf(chr.toInt) + if (idx == -1) List() else List(loggerName.take(idx)) + } + // optionally strip the context information when trying to match the logger + val tmp = rules.get() + (List(loggerName) ++ stripAfter(':') ++ stripAfter('/')).map(tmp.get).collectFirst { + case Some(rules) => rules + } + } + + override def addAppender(newAppender: Appender[ILoggingEvent]): Unit = + appenders += newAppender + + override def iteratorForAppenders(): util.Iterator[Appender[ILoggingEvent]] = { + val it = appenders.iterator + new util.Iterator[Appender[ILoggingEvent]] { + override def hasNext: Boolean = it.hasNext + override def next(): Appender[ILoggingEvent] = it.next() + } + } + + override def getAppender(name: String): Appender[ILoggingEvent] = throw new NotImplementedError() + + override def isAttached(appender: Appender[ILoggingEvent]): Boolean = appenders.contains(appender) + + override def detachAndStopAllAppenders(): Unit = { + appenders.foreach(_.stop()) + appenders.clear() + } + + override def stop(): Unit = { + super.stop() + appenders.foreach(_.stop()) + } + + override def detachAppender(appender: Appender[ILoggingEvent]): Boolean = { + val index = appenders.indexOf(appender) + if (index > -1) { + appenders.remove(index).discard + } + index != -1 + } + + override def detachAppender(name: String): Boolean = throw new NotImplementedError() +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/CantonPrettyPrinter.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/CantonPrettyPrinter.scala new file mode 100644 index 0000000000..04dc0251a5 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/CantonPrettyPrinter.scala @@ -0,0 +1,60 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.logging.pretty + +import com.digitalasset.canton.logging.pretty.Pretty.{ + DefaultEscapeUnicode, + DefaultIndent, + DefaultShowFieldNames, + DefaultWidth, +} +import com.digitalasset.canton.util.ErrorUtil +import com.google.protobuf.ByteString +import pprint.{PPrinter, Tree} + +/** Adhoc pretty printer to nicely print the full structure of a class that does not have an explicit pretty definition */ +class CantonPrettyPrinter(maxStringLength: Int, maxMessageLines: Int) { + + @SuppressWarnings(Array("org.wartremover.warts.Null")) + def printAdHoc(message: Any): String = + message match { + case null => "" + case product: Product => + try { + pprinter(product).toString + } catch { + case err: IllegalArgumentException => ErrorUtil.messageWithStacktrace(err) + } + case _: Any => + import com.digitalasset.canton.logging.pretty.Pretty.* + message.toString.limit(maxStringLength).toString + } + + private lazy val pprinter: PPrinter = PPrinter.BlackWhite.copy( + defaultWidth = DefaultWidth, + defaultHeight = maxMessageLines, + defaultIndent = DefaultIndent, + defaultEscapeUnicode = DefaultEscapeUnicode, + defaultShowFieldNames = DefaultShowFieldNames, + additionalHandlers = { + case _: ByteString => Tree.Literal("ByteString") + case s: String => + import com.digitalasset.canton.logging.pretty.Pretty.* + s.limit(maxStringLength).toTree + case Some(p) => + pprinter.treeify( + p, + escapeUnicode = DefaultEscapeUnicode, + showFieldNames = DefaultShowFieldNames, + ) + case Seq(single) => + pprinter.treeify( + single, + escapeUnicode = DefaultEscapeUnicode, + showFieldNames = DefaultShowFieldNames, + ) + }, + ) + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/Pretty.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/Pretty.scala new file mode 100644 index 0000000000..29e80d96ab --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/Pretty.scala @@ -0,0 +1,72 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.logging.pretty + +import com.digitalasset.canton.util.{ErrorUtil, ShowUtil} +import pprint.{PPrinter, Tree} + +/** Type class indicating that pretty printing is enabled for type `T`. + * + * See `PrettyPrintingTest` for examples on how to create instances. + */ +@FunctionalInterface // See https://github.com/scala/bug/issues/11644 +trait Pretty[-T] { + + /** Yields a syntax tree which is used as a basis for the String representation. + */ + def treeOf(t: T): Tree +} + +/** The companion object collects everything you might need for implementing [[Pretty]] instances. + * + * Import this as follows: + *
+  * implicit val prettyMyClass: Pretty[MyClass] = {
+  *   import Pretty._
+  *   ...
+  * }
+  * 
+ */ +object Pretty extends ShowUtil with PrettyUtil with PrettyInstances { + + val DefaultWidth = 200 + val DefaultHeight = 100 + val DefaultIndent = 2 + val DefaultEscapeUnicode: Boolean = false + val DefaultShowFieldNames: Boolean = false + + def apply[A](implicit pretty: Pretty[A]): Pretty[A] = pretty + + /** Default PPrinter used to implement `toString` and `show` methods. + */ + val DefaultPprinter: PPrinter = + PPrinter.BlackWhite.copy( + defaultWidth = DefaultWidth, + defaultHeight = DefaultHeight, + defaultIndent = DefaultIndent, + additionalHandlers = { case p: PrettyPrinting => + p.toTree + }, + ) + + /** Convenience methods for [[Pretty]] types. + */ + implicit class PrettyOps[T: Pretty](value: T) { + + /** Yields a readable string representation based on a configurable [[pprint.PPrinter]]. + */ + final def toPrettyString(pprinter: PPrinter = DefaultPprinter): String = { + try { + pprinter.copy(additionalHandlers = { case p: Tree => p })(toTree).toString + } catch { + case err: IllegalArgumentException => + ErrorUtil.messageWithStacktrace(err) + } + } + + /** The tree representation of `value`. + */ + final def toTree: Tree = implicitly[Pretty[T]].treeOf(value) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyInstances.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyInstances.scala new file mode 100644 index 0000000000..8cc385db5b --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyInstances.scala @@ -0,0 +1,352 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.logging.pretty + +import cats.Show.Shown +import com.daml.ledger.api.v1.completion.Completion +import com.daml.ledger.api.v1.ledger_offset.LedgerOffset +import com.daml.ledger.api.v1.ledger_offset.LedgerOffset.LedgerBoundary +import com.daml.ledger.javaapi.data.Party +import com.daml.ledger.javaapi.data.codegen.ContractId +import com.daml.lf.data.Ref +import com.daml.lf.data.Ref.{DottedName, PackageId, QualifiedName} +import com.daml.lf.transaction.ContractStateMachine.ActiveLedgerState +import com.daml.lf.transaction.TransactionErrors.{ + DuplicateContractId, + DuplicateContractIdKIError, + DuplicateContractKey, + DuplicateContractKeyKIError, + InconsistentContractKey, + InconsistentContractKeyKIError, + KeyInputError, +} +import com.daml.lf.value.Value +import com.daml.nonempty.{NonEmpty, NonEmptyUtil} +import com.digitalasset.canton.config.RequireTypes.{Port, RefinedNumeric} +import com.digitalasset.canton.ledger.api.DeduplicationPeriod +import com.digitalasset.canton.ledger.participant.state.v2.ChangeId +import com.digitalasset.canton.ledger.{configuration, offset} +import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.topology.UniqueIdentifier +import com.digitalasset.canton.tracing.{TraceContext, W3CTraceContext} +import com.digitalasset.canton.util.ShowUtil.HashLength +import com.digitalasset.canton.util.{ErrorUtil, HexString} +import com.digitalasset.canton.{LedgerApplicationId, LfPartyId, LfTimestamp, Uninhabited} +import com.google.protobuf.ByteString +import io.grpc.Status +import io.grpc.health.v1.HealthCheckResponse.ServingStatus +import pprint.Tree +import slick.util.{DumpInfo, Dumpable} + +import java.lang.Long as JLong +import java.net.URI +import java.time.{Duration as JDuration, Instant} +import java.util.UUID +import scala.annotation.nowarn +import scala.concurrent.duration.{Duration, FiniteDuration} + +/** Collects instances of [[Pretty]] for common types. + */ +trait PrettyInstances { + + import Pretty.* + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + implicit def prettyPrettyPrinting[T <: PrettyPrinting]: Pretty[T] = inst => + if (inst == null) PrettyUtil.nullTree + else { + // Cast is required to make IDEA happy. + inst.pretty.treeOf(inst.asInstanceOf[inst.type]) + } + + implicit def prettyTree[T <: Tree]: Pretty[T] = identity + + /** Makes the syntax from [[com.digitalasset.canton.util.ShowUtil]] accessible in places where a Pretty is expected. + */ + implicit def prettyShown: Pretty[Shown] = prettyOfString(_.toString) + + implicit def prettyInt: Pretty[Int] = prettyOfString(_.toString) + + implicit def prettyLong: Pretty[Long] = prettyOfString(_.toString) + + implicit def prettyJLong: Pretty[JLong] = prettyOfString(_.toString) + + implicit def prettyBoolean: Pretty[Boolean] = prettyOfString(_.toString) + + implicit val prettyUnit: Pretty[Unit] = prettyOfString(_ => "()") + + implicit def prettySeq[T: Pretty]: Pretty[Seq[T]] = treeOfIterable("Seq", _) + + @nowarn("msg=dead code following this construct") + implicit val prettyUninhabited: Pretty[Uninhabited] = (_: Uninhabited) => ??? + + implicit def prettyNonempty[T: Pretty]: Pretty[NonEmpty[T]] = + NonEmptyUtil.instances.prettyNonEmpty + + implicit def prettyArray[T: Pretty]: Pretty[Array[T]] = treeOfIterable("Array", _) + + implicit def prettySet[T: Pretty]: Pretty[Set[T]] = treeOfIterable("Set", _) + + implicit def prettyPair[T1: Pretty, T2: Pretty]: Pretty[(T1, T2)] = + prettyNode("Pair", param("fst", _._1), param("snd", _._2)) + + implicit def prettyTriple[T1: Pretty, T2: Pretty, T3: Pretty]: Pretty[(T1, T2, T3)] = + prettyNode("Triple", param("#1", _._1), param("#2", _._2), param("#3", _._3)) + + implicit def prettyOption[T: Pretty]: Pretty[Option[T]] = { + case None => Tree.Apply("None", Iterator()) + case Some(x) => Tree.Apply("Some", Iterator(x.toTree)) + } + + implicit def prettyEither[L: Pretty, R: Pretty]: Pretty[Either[L, R]] = { + case Left(x) => Tree.Apply("Left", Iterator(x.toTree)) + case Right(x) => Tree.Apply("Right", Iterator(x.toTree)) + } + + implicit def prettyThrowable: Pretty[Throwable] = prettyOfString(ErrorUtil.messageWithStacktrace) + + implicit def prettyMap[K: Pretty, V: Pretty]: Pretty[collection.Map[K, V]] = + elements => + treeOfIterable("Map", elements.map { case (k, v) => Tree.Infix(k.toTree, "->", v.toTree) }) + + private def treeOfIterable[T: Pretty](prefix: String, elements: Iterable[T]): Tree = + if (elements.sizeCompare(1) == 0) { + elements.iterator.next().toTree + } else { + Tree.Apply(prefix, elements.map(_.toTree).iterator) + } + + implicit val prettyJDuration: Pretty[JDuration] = prettyOfString( + // https://stackoverflow.com/a/40487511/6346418 + _.toString.substring(2).replaceAll("(\\d[HMS])(?!$)", "$1 ").toLowerCase + ) + + implicit def prettyDuration: Pretty[Duration] = { (duration: Duration) => + duration match { + case fduration: FiniteDuration => + import scala.jdk.DurationConverters.ScalaDurationOps + prettyJDuration.treeOf( + fduration.toJava + ) + case infDuration: Duration.Infinite => Tree.Literal(infDuration.toString) + } + } + + implicit def prettyURI: Pretty[URI] = prettyOfString(_.toString) + + implicit def prettyInstant: Pretty[Instant] = prettyOfString(_.toString) + + implicit val prettyUuid: Pretty[UUID] = prettyOfString(_.toString.readableHash.toString) + // There is deliberately no instance for `String` to force clients + // use ShowUtil.ShowStringSyntax instead (e.g. "string".singleQuoted). + def prettyString: Pretty[String] = prettyOfString(identity) + + implicit val prettyByteString: Pretty[ByteString] = + prettyOfString(b => HexString.toHexString(b, HashLength).readableHash.toString) + + implicit def prettyDumpInfo: Pretty[DumpInfo] = { + implicit def prettyDumpInfoChild: Pretty[(String, Dumpable)] = { case (label, child) => + Tree.Infix(label.unquoted.toTree, "=", child.toTree) + } + + prettyOfClass( + param("name", _.name.singleQuoted, _.name.nonEmpty), + unnamedParam(_.mainInfo.doubleQuoted, _.mainInfo.nonEmpty), + unnamedParamIfNonEmpty(_.children.toSeq), + // Omitting attrInfo, as it may contain confidential data. + ) + } + + implicit def prettyDumpable: Pretty[Dumpable] = prettyOfParam(_.getDumpInfo) + + implicit def prettyLedgerString: Pretty[Ref.LedgerString] = prettyOfString(id => id: String) + + implicit val prettyLedgerBoundary: Pretty[LedgerBoundary] = { + case LedgerBoundary.LEDGER_BEGIN => Tree.Literal("LEDGER_BEGIN") + case LedgerBoundary.LEDGER_END => Tree.Literal("LEDGER_END") + case LedgerBoundary.Unrecognized(value) => Tree.Literal(s"Unrecognized($value)") + } + + implicit val prettyLedgerOffset: Pretty[LedgerOffset] = { + case LedgerOffset(LedgerOffset.Value.Absolute(absolute)) => + Tree.Apply("AbsoluteOffset", Iterator(Tree.Literal(absolute))) + case LedgerOffset(LedgerOffset.Value.Boundary(boundary)) => + Tree.Apply("Boundary", Iterator(boundary.toTree)) + case LedgerOffset(LedgerOffset.Value.Empty) => Tree.Literal("Empty") + } + + implicit val prettyReadServiceOffset: Pretty[offset.Offset] = prettyOfString( + // Do not use `toReadableHash` because this is not a hash but a hex-encoded string + // whose end contains the most important information + _.toHexString + ) + + implicit def prettyLfParticipantId: Pretty[Ref.ParticipantId] = prettyOfString(prettyUidString(_)) + + implicit def prettyLedgerApplicationId: Pretty[LedgerApplicationId] = prettyOfString( + prettyUidString(_) + ) + + implicit def prettyLfTimestamp: Pretty[LfTimestamp] = prettyOfString(_.toString) + + implicit def prettyLfPartyId: Pretty[LfPartyId] = prettyOfString(prettyUidString(_)) + + implicit def prettyLfHash: Pretty[LfHash] = prettyOfString(_.toHexString.readableHash.toString) + + implicit val prettyNodeId: Pretty[LfNodeId] = prettyOfParam(_.index) + + implicit def prettyPrimitiveParty: Pretty[Party] = + prettyOfString(party => prettyUidString(party.getValue)) + + private def prettyUidString(partyStr: String): String = + UniqueIdentifier.fromProtoPrimitive_(partyStr) match { + case Right(uid) => uid.show + case Left(_) => partyStr + } + + implicit def prettyPackageId: Pretty[PackageId] = prettyOfString(id => show"${id.readableHash}") + + implicit def prettyChangeId: Pretty[ChangeId] = prettyOfClass( + param("application Id", _.applicationId), + param("command Id", _.commandId), + param("act as", _.actAs), + ) + + implicit def prettyLfDottedName: Pretty[DottedName] = prettyOfString { dottedName => + val segments = dottedName.segments + val prefixes = segments.length - 1 + val shortenedPrefixes = if (prefixes > 0) { + segments.init.map(_.substring(0, 1)).toSeq.mkString(".") + "." + } else "" + shortenedPrefixes + segments.last + } + + implicit def prettyLfQualifiedName: Pretty[QualifiedName] = + prettyOfString(qname => show"${qname.module}:${qname.name}") + + implicit def prettyLfIdentifier: Pretty[com.daml.lf.data.Ref.Identifier] = + prettyOfString(id => show"${id.packageId}:${id.qualifiedName}") + + implicit def prettyLfContractId: Pretty[LfContractId] = prettyOfString { + case LfContractId.V1(discriminator, suffix) + // Shorten only Canton contract ids + if suffix.startsWith(AuthenticatedContractIdVersionV2.versionPrefixBytes) => + val prefixBytesSize = CantonContractIdVersion.versionPrefixBytesSize + + val cantonVersionPrefix = suffix.slice(0, prefixBytesSize) + val rawSuffix = suffix.slice(prefixBytesSize, suffix.length) + + discriminator.toHexString.readableHash.toString + + cantonVersionPrefix.toHexString + + rawSuffix.toHexString.readableHash.toString + case lfContractId: LfContractId => + // Don't abbreviate anything for unusual contract ids + lfContractId.toString + } + + implicit def prettyLfTransactionVersion: Pretty[LfTransactionVersion] = prettyOfString( + _.protoValue + ) + + implicit def prettyContractId: Pretty[ContractId[_]] = prettyOfString { coid => + val coidStr = coid.contractId + val tokens = coidStr.split(':') + if (tokens.lengthCompare(2) == 0) { + tokens(0).readableHash.toString + ":" + tokens(1).readableHash.toString + } else { + // Don't abbreviate anything for unusual contract ids + coidStr + } + } + + implicit def prettyLfGlobalKey: Pretty[LfGlobalKey] = prettyOfClass( + param("templateId", _.templateId), + param("hash", _.hash.toHexString.readableHash), + ) + + implicit def prettyLedgerTimeModel: Pretty[configuration.LedgerTimeModel] = prettyOfClass( + param("avgTransactionLatency", _.avgTransactionLatency), + param("minSkew", _.minSkew), + param("maxSkew", _.maxSkew), + ) + + implicit def prettyLedgerConfiguration: Pretty[configuration.Configuration] = prettyOfClass( + param("generation", _.generation), + param("maxDeduplicationDuration", _.maxDeduplicationDuration), + param("timeModel", _.timeModel), + ) + + implicit def prettyV2DeduplicationPeriod: Pretty[DeduplicationPeriod] = + prettyOfString { + case deduplicationDuration: DeduplicationPeriod.DeduplicationDuration => + s"(duration=${deduplicationDuration.duration})" + case dedupOffset: DeduplicationPeriod.DeduplicationOffset => + s"(offset=${dedupOffset.offset})" + } + + implicit def prettyCompletion: Pretty[Completion] = + prettyOfClass( + unnamedParamIfDefined(_.status), + param("commandId", _.commandId.singleQuoted), + param("transactionId", _.transactionId.singleQuoted, _.transactionId.nonEmpty), + ) + + implicit def prettyRpcStatus: Pretty[com.google.rpc.status.Status] = + prettyOfClass( + customParam(rpcStatus => Status.fromCodeValue(rpcStatus.code).getCode.toString), + customParam(_.message), + paramIfNonEmpty("details", _.details.map(_.toString.unquoted)), + ) + + implicit def prettyGrpcStatus: Pretty[io.grpc.Status] = + prettyOfClass( + param("code", _.getCode.name().unquoted), + paramIfDefined("description", x => Option(x.getDescription()).map(_.doubleQuoted)), + paramIfDefined("cause", x => Option(x.getCause()).map(_.getMessage.doubleQuoted)), + ) + + implicit lazy val prettyValue: Pretty[Value] = + adHocPrettyInstance // TODO(#3269) Using this pretty-printer may leak confidential data. + + implicit lazy val prettyVersionedValue: Pretty[Value.VersionedValue] = prettyOfClass( + unnamedParam(_.unversioned), + param("version", _.version), + ) + + implicit val prettyW3CTraceContext: Pretty[W3CTraceContext] = prettyOfClass( + param("parent", _.parent.unquoted), + paramIfDefined("state", _.state.map(_.unquoted)), + ) + + implicit val prettyTraceContext: Pretty[TraceContext] = prettyOfClass( + paramIfDefined("trace id", _.traceId.map(_.unquoted)), + paramIfDefined("W3C context", _.asW3CTraceContext), + ) + + implicit val prettyKeyInputError: Pretty[KeyInputError] = { + case InconsistentContractKeyKIError(e: InconsistentContractKey) => + prettyOfClass[InconsistentContractKey](unnamedParam(_.key)).treeOf(e) + case DuplicateContractKeyKIError(e: DuplicateContractKey) => + prettyOfClass[DuplicateContractKey](unnamedParam(_.key)).treeOf(e) + case DuplicateContractIdKIError(e: DuplicateContractId) => + prettyOfClass[DuplicateContractId](unnamedParam(_.contractId)).treeOf(e) + } + + implicit def prettyActiveLedgerState[T: Pretty]: Pretty[ActiveLedgerState[T]] = + prettyOfClass[ActiveLedgerState[T]]( + param("locallyCreatedThisTimeline", _.locallyCreatedThisTimeline), + param("consumedBy", _.consumedBy), + param("localActiveKeys", _.localActiveKeys), + ) + + implicit val prettyPort: Pretty[Port] = prettyOfString(_.unwrap.toString) + + implicit val prettyRefinedNumeric: Pretty[RefinedNumeric[_]] = prettyOfString(_.unwrap.toString) + + implicit val prettyServingStatus: Pretty[ServingStatus] = prettyOfClass( + param("status", _.name().singleQuoted) + ) +} + +object PrettyInstances extends PrettyInstances diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyPrinting.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyPrinting.scala new file mode 100644 index 0000000000..f975c405dc --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyPrinting.scala @@ -0,0 +1,25 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.logging.pretty + +import com.digitalasset.canton.util.ShowUtil + +/** Extend this trait to directly enable pretty printing via supertype. + */ +trait PrettyPrinting extends ShowUtil with PrettyUtil { + + /** Indicates how to pretty print this instance. + * See `PrettyPrintingTest` for examples on how to implement this method. + */ + protected[pretty] def pretty: Pretty[this.type] + + /** Yields a readable string representation based on [[com.digitalasset.canton.logging.pretty.Pretty.DefaultPprinter]]. + * `Final` to avoid accidental overwriting. + */ + // Do not cache the toString representation because it could be outdated in classes with mutable state + override final def toString: String = { + // Special construction here to fail gracefully if this is a mocked instance. + Pretty.PrettyOps[this.type](this)(pretty).toPrettyString() + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyUtil.scala new file mode 100644 index 0000000000..2e21672c5c --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyUtil.scala @@ -0,0 +1,186 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.logging.pretty + +import cats.syntax.functorFilter.* +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.logging.pretty.PrettyUtil.nullTree +import pprint.{Tree, Walker} + +import scala.annotation.tailrec +import scala.reflect.ClassTag + +/** Utility methods for constructing [[Pretty]] instances. + */ +trait PrettyUtil { + + import Pretty.PrettyOps + + /** A tree representing the type name and parameter trees. + */ + def prettyOfClass[T](getParamTrees: (T => Option[Tree])*): Pretty[T] = + inst => { + // getSimpleName on an anonymous class returns an empty string + // so we search for the first non-anonymous superclass. + // + // To guard against the possibility that the direct superclass of an anonymous + // class is again anonymous (not sure whether this can happen in Scala) + // we go up in the hierarchy until we find a non-anonymous class. + @tailrec + def firstNonAnonmymousSuperclass(clazz: Class[_]): Class[_] = + if (clazz.isAnonymousClass) { + // The superclass cannot be `null` as neither java.lang.Object + // nor Java interfaces nor primitive types nor void are anonymous classes. + firstNonAnonmymousSuperclass(clazz.getSuperclass) + } else clazz + + if (inst == null) nullTree + else { + val simpleName = firstNonAnonmymousSuperclass(inst.getClass).getSimpleName + Tree.Apply(simpleName, getParamTrees.mapFilter(_(inst)).iterator) + } + } + + /** A tree presenting the type name only. (E.g., for case objects.) + */ + def prettyOfObject[T <: Product]: Pretty[T] = inst => + if (inst == null) nullTree else treeOfString(inst.productPrefix) + + /** A tree consisting of a labelled node with the given children. */ + def prettyNode[T](label: String, children: (T => Option[Tree])*): Pretty[T] = + inst => Tree.Apply(label, children.mapFilter(_(inst)).iterator) + + /** A tree representing both parameter name and value. + */ + def param[T, V: Pretty]( + name: String, + getValue: T => V, + cond: T => Boolean = (_: T) => true, + ): T => Option[Tree] = + conditionalParam[T, V](getValue, cond, value => mkNameValue(name, value.toTree)) + + /** A tree only written if not matching the default value */ + def paramIfNotDefault[T, V: Pretty]( + name: String, + getValue: T => V, + default: V, + ): T => Option[Tree] = + param(name, getValue, getValue(_) != default) + + private def conditionalParam[T, V]( + getValue: T => V, + cond: T => Boolean, + resultOfValueTree: V => Tree, + ): T => Option[Tree] = + inst => + if (cond(inst)) { + Some(resultOfValueTree(getValue(inst))) + } else { + None + } + + def prettyInfix[T] = new PrettyUtil.PrettyInfixPartiallyApplied[T](false) + + private def mkNameValue(name: String, valueTree: Tree): Tree = + Tree.Infix(treeOfString(name), "=", valueTree) + + def paramIfNonEmpty[T, V <: IterableOnce[_]: Pretty]( + name: String, + getValue: T => V, + ): T => Option[Tree] = + param(name, getValue, getValue(_).iterator.nonEmpty) + + def paramIfDefined[T, V: Pretty](name: String, getValue: T => Option[V]): T => Option[Tree] = + getValue(_).map(value => mkNameValue(name, value.toTree)) + + def paramIfTrue[T](label: String, getValue: T => Boolean): T => Option[Tree] = + customParam(_ => label, getValue) + + /** A tree representing a parameter value without a parameter name. + */ + def unnamedParam[T, V: Pretty]( + getValue: T => V, + cond: T => Boolean = (_: T) => true, + ): T => Option[Tree] = + conditionalParam[T, V](getValue, cond, _.toTree) + + def unnamedParamIfNonEmpty[T, V <: IterableOnce[_]: Pretty](getValue: T => V): T => Option[Tree] = + unnamedParam(getValue, getValue(_).iterator.nonEmpty) + + def unnamedParamIfDefined[T, V: Pretty](getValue: T => Option[V]): T => Option[Tree] = + getValue(_).map(value => value.toTree) + + /** A tree representing a parameter name without a parameter value. + * Use this for parameters storing confidential or binary data. + */ + def paramWithoutValue[T](name: String, cond: T => Boolean = (_: T) => true): T => Option[Tree] = + conditionalParam(_ => treeOfString("..."), cond, mkNameValue(name, _)) + + /** Use this if you need a custom representation of a parameter. + * Do not use this to create lengthy strings, as line wrapping is not supported. + */ + def customParam[T]( + getValue: T => String, + cond: T => Boolean = (_: T) => true, + ): T => Option[Tree] = + conditionalParam(getValue, cond, treeOfString) + + /** Use this to indicate that you've omitted fields from pretty printing */ + def indicateOmittedFields[T]: T => Option[Tree] = + customParam(_ => "...") + + /** Use this to give a class with a singleton parameter the same pretty representation as the parameter. + */ + def prettyOfParam[T, V: Pretty](getValue: T => V): Pretty[T] = inst => + if (inst == null) nullTree else getValue(inst).toTree + + /** Creates a pretty instance from a string function. + * Do not use this with lengthy strings, as line wrapping is not supported. + */ + def prettyOfString[T](toString: T => String): Pretty[T] = inst => + if (inst == null) nullTree else treeOfString(toString(inst)) + + private def treeOfString(s: String): Tree = + if (s.isEmpty) { + // Note that the parameter of `Literal` must be non-empty. + Tree.Literal("\"\"") + } else { + Tree.Literal(s) + } + + /** Use this as a temporary solution, to make the code compile during an ongoing migration. + * Drawbacks: + *
    + *
  • Instances of `Pretty[T]` are ignored.
  • + *
  • No parameter names
  • + *
+ */ + def adHocPrettyInstance[T <: Product](implicit c: ClassTag[T]): Pretty[T] = + // Need to restrict to Product subtypes as the Walker cannot faithfully deal with arbitrary types. + new Walker { + override def additionalHandlers: PartialFunction[Any, Tree] = { + case p: PrettyPrinting if !c.runtimeClass.isInstance(p) => p.pretty.treeOf(p) + case p: Product if p.productArity == 0 => treeOfString(p.productPrefix) + } + }.treeify( + _, + escapeUnicode = Pretty.DefaultEscapeUnicode, + showFieldNames = Pretty.DefaultShowFieldNames, + ) +} + +object PrettyUtil extends PrettyUtil { + private[pretty] final class PrettyInfixPartiallyApplied[T](private val dummy: Boolean) + extends AnyVal { + def apply[U: Pretty, V: Pretty](first: T => U, infixOp: String, second: T => V): Pretty[T] = { + inst => + import Pretty.PrettyOps + if (inst == null) nullTree + else Tree.Infix(first(inst).toTree, infixOp, second(inst).toTree) + } + } + + /** How to pretty-print `null` values. This is consistent with [[pprint.Walker.treeify]] */ + private[pretty] val nullTree = Tree.Literal("null") +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/DbStorageMetrics.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/DbStorageMetrics.scala new file mode 100644 index 0000000000..2664911a96 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/DbStorageMetrics.scala @@ -0,0 +1,129 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.metrics + +import com.daml.metrics.api.MetricDoc.MetricQualification.Debug +import com.daml.metrics.api.MetricHandle.{Counter, Gauge, Timer} +import com.daml.metrics.api.noop.{NoOpGauge, NoOpTimer} +import com.daml.metrics.api.{MetricDoc, MetricName, MetricsContext} +import com.digitalasset.canton.metrics.MetricHandle.MetricsFactory + +import scala.annotation.nowarn +import scala.concurrent.duration.* + +@MetricDoc.GroupTag( + representative = "canton.db-storage..executor", + groupableClass = classOf[DbQueueMetrics], +) +@nowarn("cat=deprecation") +class DbStorageMetrics( + basePrefix: MetricName, + metricsFactory: MetricsFactory, +) { + + val prefix: MetricName = basePrefix :+ "db-storage" + + def loadGaugeM(name: String): TimedLoadGauge = { + val timerM = metricsFactory.timer(prefix :+ name) + metricsFactory.loadGauge(prefix :+ name :+ "load", 1.second, timerM)(MetricsContext.Empty) + } + + @MetricDoc.Tag( + summary = "Timer monitoring duration and rate of accessing the given storage", + description = """Covers both read from and writes to the storage.""", + qualification = Debug, + ) + @SuppressWarnings(Array("org.wartremover.warts.Null")) + val timerExampleForDocs: Timer = NoOpTimer(prefix :+ "") + + @MetricDoc.Tag( + summary = "The load on the given storage", + description = + """The load is a factor between 0 and 1 describing how much of an existing interval + |has been spent reading from or writing to the storage.""", + qualification = Debug, + ) + @SuppressWarnings(Array("org.wartremover.warts.Null")) + val loadExampleForDocs: Gauge[Double] = + NoOpGauge(prefix :+ "" :+ "load", 0d) + + object alerts extends DbAlertMetrics(prefix, metricsFactory) + + object queue extends DbQueueMetrics(prefix :+ "general", metricsFactory) + + object writeQueue extends DbQueueMetrics(prefix :+ "write", metricsFactory) + + object locks extends DbQueueMetrics(prefix :+ "locks", metricsFactory) +} + +@nowarn("cat=deprecation") +class DbQueueMetrics( + basePrefix: MetricName, + factory: MetricsFactory, +) { + val prefix: MetricName = basePrefix :+ "executor" + + @MetricDoc.Tag( + summary = "Number of database access tasks waiting in queue", + description = + """Database access tasks get scheduled in this queue and get executed using one of the + |existing asynchronous sessions. A large queue indicates that the database connection is + |not able to deal with the large number of requests. + |Note that the queue has a maximum size. Tasks that do not fit into the queue + |will be retried, but won't show up in this metric.""", + qualification = Debug, + ) + val queue: Counter = factory.counter(prefix :+ "queued") + + @MetricDoc.Tag( + summary = "Number of database access tasks currently running", + description = """Database access tasks run on an async executor. This metric shows + |the current number of tasks running in parallel.""", + qualification = Debug, + ) + val running: Counter = factory.counter(prefix :+ "running") + + @MetricDoc.Tag( + summary = "Scheduling time metric for database tasks", + description = """Every database query is scheduled using an asynchronous executor with a queue. + |The time a task is waiting in this queue is monitored using this metric.""", + qualification = Debug, + ) + val waitTimer: Timer = factory.timer(prefix :+ "waittime") + +} + +@nowarn("cat=deprecation") +class DbAlertMetrics( + basePrefix: MetricName, + factory: MetricsFactory, +) { + val prefix: MetricName = basePrefix :+ "alerts" + + @MetricDoc.Tag( + summary = "Number of failed writes to the event log", + description = + """Failed writes to the single dimension event log indicate an issue requiring user intervention. In the case of + |domain event logs, the corresponding domain no longer emits any subsequent events until domain recovery is + |initiated (e.g. by disconnecting and reconnecting the participant from the domain). In the case of the + |participant event log, an operation might need to be reissued. If this counter is larger than zero, check the + |canton log for errors for details. + |""", + qualification = Debug, + ) + val failedEventLogWrites: Counter = factory.counter(prefix :+ "single-dimension-event-log") + + @MetricDoc.Tag( + summary = "Number of failed writes to the multi-domain event log", + description = + """Failed writes to the multi domain event log indicate an issue requiring user intervention. In the case of + |domain event logs, the corresponding domain no longer emits any subsequent events until domain recovery is + |initiated (e.g. by disconnecting and reconnecting the participant from the domain). In the case of the + |participant event log, an operation might need to be reissued. If this counter is larger than zero, check the + |canton log for errors for details. + |""", + qualification = Debug, + ) + val failedMultiDomainEventLogWrites: Counter = factory.counter(prefix :+ "multi-domain-event-log") +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/Gauges.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/Gauges.scala new file mode 100644 index 0000000000..c2e11de875 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/Gauges.scala @@ -0,0 +1,138 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.metrics + +import cats.data.{EitherT, OptionT} +import com.daml.metrics.Timed +import com.daml.metrics.api.MetricHandle.Gauge.CloseableGauge +import com.daml.metrics.api.MetricHandle.Timer +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.util.CheckedT +import com.digitalasset.canton.util.Thereafter.syntax.* + +import java.util.concurrent.atomic.AtomicInteger +import scala.annotation.tailrec +import scala.collection.mutable +import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{ExecutionContext, Future, blocking} + +class TimedLoadGauge(timer: Timer, loadGauge: LoadGauge, delegateMetricHandle: CloseableGauge) + extends CloseableGauge { + def event[T](fut: => Future[T])(implicit ec: ExecutionContext): Future[T] = + Timed.future(timer, loadGauge.event(fut)) + + def eventUS[T](fut: => FutureUnlessShutdown[T])(implicit + ec: ExecutionContext + ): FutureUnlessShutdown[T] = + FutureUnlessShutdown(event(fut.unwrap)) + + def syncEvent[T](body: => T): T = + Timed.value(timer, loadGauge.syncEvent(body)) + + def checkedTEvent[A, N, R](checked: => CheckedT[Future, A, N, R])(implicit + ec: ExecutionContext + ): CheckedT[Future, A, N, R] = + CheckedT(event(checked.value)) + + def eitherTEvent[A, B](eitherT: => EitherT[Future, A, B])(implicit + ec: ExecutionContext + ): EitherT[Future, A, B] = + EitherT(event(eitherT.value)) + + def eitherTEventUnlessShutdown[A, B](eitherT: => EitherT[FutureUnlessShutdown, A, B])(implicit + ec: ExecutionContext + ): EitherT[FutureUnlessShutdown, A, B] = + EitherT(FutureUnlessShutdown(event(eitherT.value.unwrap))) + + def optionTEvent[A](optionT: => OptionT[Future, A])(implicit + ec: ExecutionContext + ): OptionT[Future, A] = + OptionT(event(optionT.value)) + + override def close(): Unit = delegateMetricHandle.close() + + override def name: String = delegateMetricHandle.name + + override def metricType: String = delegateMetricHandle.metricType +} + +class LoadGauge(interval: FiniteDuration, now: => Long = System.nanoTime) { + + private val intervalNanos = interval.toNanos + private val measure = mutable.ListBuffer[(Boolean, Long)]() + private val eventCount = new AtomicInteger(1) + + record(false) + + private def record(loaded: Boolean): Unit = blocking(synchronized { + val count = if (loaded) { + eventCount.getAndIncrement() + } else { + eventCount.decrementAndGet() + } + if (count == 0) { + measure.append((loaded, now)) + } + cleanup(now) + }) + + def event[T](fut: => Future[T])(implicit ec: ExecutionContext): Future[T] = { + record(true) + fut.thereafter { _ => + record(false) + } + } + + def syncEvent[T](body: => T): T = { + record(true) + try { + body + } finally { + record(false) + } + } + + @tailrec + private def cleanup(tm: Long): Unit = { + // keep on cleaning up + if (measure.lengthCompare(1) > 0 && measure(1)._2 <= tm - intervalNanos) { + measure.remove(0).discard + cleanup(tm) + } + } + + @SuppressWarnings(Array("org.wartremover.warts.Var", "org.wartremover.warts.IterableOps")) + def getLoad: Double = { + val endTs = now + val startTs = endTs - intervalNanos + + def computeLoad(lastLoaded: Boolean, lastTs: Long, ts: Long): Long = + if (lastLoaded) ts - lastTs else 0 + + blocking(synchronized { + cleanup(endTs) + // We know that t0 <= t1 <= ... <= tn <= endTs and + // startTs < t1, if t1 exists. + + val (loaded0, t0) = measure.head + + var lastLoaded = loaded0 + var lastTs = Math.max(t0, startTs) + var load = 0L + + for ((loaded, ts) <- measure.tail) { + load += computeLoad(lastLoaded, lastTs, ts) + + lastLoaded = loaded + lastTs = ts + } + + load += computeLoad(lastLoaded, lastTs, endTs) + + load.toDouble / intervalNanos + }) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/MetricHandle.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/MetricHandle.scala new file mode 100644 index 0000000000..9e0fbff652 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/MetricHandle.scala @@ -0,0 +1,49 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.metrics + +import com.codahale.metrics.MetricRegistry +import com.daml.metrics.api.MetricHandle.* +import com.daml.metrics.api.dropwizard.DropwizardMetricsFactory +import com.daml.metrics.api.noop.NoOpMetricsFactory as DamlNoOpMetricsFactory +import com.daml.metrics.api.opentelemetry.OpenTelemetryMetricsFactory +import com.daml.metrics.api.{MetricHandle as DamlMetricHandle, MetricName, MetricsContext} +import io.opentelemetry.api.metrics + +import scala.annotation.nowarn +import scala.concurrent.duration.FiniteDuration + +object MetricHandle { + + @deprecated("Use LabeledMetricsFactory to create metrics that support labels.", since = "2.7.0") + trait MetricsFactory extends DamlMetricHandle.MetricsFactory { + + def loadGauge( + name: MetricName, + interval: FiniteDuration, + timer: Timer, + )(implicit mc: MetricsContext): TimedLoadGauge = { + val definedLoadGauge = new LoadGauge(interval) + val registeredLoadGauge = gaugeWithSupplier(name, () => definedLoadGauge.getLoad) + new TimedLoadGauge(timer, definedLoadGauge, registeredLoadGauge) + } + } + + @nowarn("cat=deprecation") + trait LabeledMetricsFactory extends MetricsFactory with DamlMetricHandle.LabeledMetricsFactory + + @nowarn("cat=deprecation") + class CantonDropwizardMetricsFactory(registry: MetricRegistry) + extends DropwizardMetricsFactory(registry) + with MetricsFactory + + object NoOpMetricsFactory extends DamlNoOpMetricsFactory with LabeledMetricsFactory + + class CantonOpenTelemetryMetricsFactory( + otelMeter: metrics.Meter, + globalMetricsContext: MetricsContext, + ) extends OpenTelemetryMetricsFactory(otelMeter, globalMetricsContext) + with LabeledMetricsFactory + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/MetricsFactoryType.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/MetricsFactoryType.scala new file mode 100644 index 0000000000..6daecc716b --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/MetricsFactoryType.scala @@ -0,0 +1,19 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.metrics + +import com.daml.metrics.api.MetricsContext +import com.digitalasset.canton.metrics.MetricHandle.LabeledMetricsFactory + +sealed trait MetricsFactoryType + +object MetricsFactoryType { + + // Used to provide an in-memory metrics factory for testing + // Most provide a new instance for each component + final case class InMemory(provider: MetricsContext => LabeledMetricsFactory) + extends MetricsFactoryType + // Use actual Dropwizard/OpenTelemetry implementations + case object External extends MetricsFactoryType +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/SequencerClientMetrics.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/SequencerClientMetrics.scala new file mode 100644 index 0000000000..00586e30d8 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/metrics/SequencerClientMetrics.scala @@ -0,0 +1,147 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.metrics + +import com.daml.metrics.api.MetricDoc.MetricQualification.{Debug, Saturation} +import com.daml.metrics.api.MetricHandle.{Counter, Gauge, MetricsFactory, Timer} +import com.daml.metrics.api.{MetricDoc, MetricName, MetricsContext} + +import scala.annotation.nowarn + +class SequencerClientMetrics( + basePrefix: MetricName, + @nowarn("cat=deprecation") val metricsFactory: MetricsFactory, +) { + val prefix: MetricName = basePrefix :+ "sequencer-client" + + @MetricDoc.Tag( + summary = "Timer monitoring time and rate of sequentially handling the event application logic", + description = """All events are received sequentially. This handler records the + |the rate and time it takes the application (participant or domain) to handle the events.""", + qualification = Debug, + ) + val applicationHandle: Timer = metricsFactory.timer(prefix :+ "application-handle") + + @MetricDoc.Tag( + summary = "Timer monitoring time and rate of entire event handling", + description = + """Most event handling cost should come from the application-handle. This timer measures + |the full time (which should just be marginally more than the application handle.""", + qualification = Debug, + ) + val processingTime: Timer = metricsFactory.timer(prefix :+ "event-handle") + + @MetricDoc.Tag( + summary = "The delay on the event processing", + description = """Every message received from the sequencer carries a timestamp that was assigned + |by the sequencer when it sequenced the message. This timestamp is called the sequencing timestamp. + |The component receiving the message on the participant, mediator or topology manager side, is the sequencer client. + |Upon receiving the message, the sequencer client compares the time difference between the + |sequencing time and the computers local clock and exposes this difference as the given metric. + |The difference will include the clock-skew and the processing latency between assigning the timestamp on the + |sequencer and receiving the message by the recipient. + |If the difference is large compared to the usual latencies and if clock skew can be ruled out, then + |it means that the node is still trying to catch up with events that were sequenced by the + |sequencer a while ago. This can happen after having been offline for a while or if the node is + |too slow to keep up with the messaging load.""", + qualification = Debug, + ) + val delay: Gauge[Long] = metricsFactory.gauge(prefix :+ "delay", 0L)(MetricsContext.Empty) + + object handler { + val prefix: MetricName = SequencerClientMetrics.this.prefix :+ "handler" + + @MetricDoc.Tag( + summary = + "Nodes process the events from the domain's sequencer in batches. This metric tracks how many such batches are processed in parallel.", + description = """Incoming messages are processed by a sequencer client, which combines them into batches of + |size up to 'event-inbox-size' before sending them to an application handler for processing. Depending on the + |system's configuration, the rate at which event batches are sent to the handler may be throttled to avoid + |overwhelming it with too many events at once. + | + |Indicators that the configured upper bound may be too low: + |This metric constantly is closed to the configured maximum, which is exposed via 'max-in-flight-event-batches', + |while the system's resources are under-utilized. + |Indicators that the configured upper bound may be too high: + |Out-of-memory errors crashing the JVM or frequent garbage collection cycles that slow down processing. + | + |The metric tracks how many of these batches have been sent to the application handler but have not yet + |been fully processed. This metric can help identify potential bottlenecks or issues with the application's + |processing of events and provide insights into the overall workload of the system.""", + qualification = Saturation, + ) + val actualInFlightEventBatches: Counter = + metricsFactory.counter(prefix :+ "actual-in-flight-event-batches") + + @MetricDoc.Tag( + summary = + "Nodes process the events from the domain's sequencer in batches. This metric tracks the upper bound of such batches being processed in parallel.", + description = """Incoming messages are processed by a sequencer client, which combines them into batches of + |size up to 'event-inbox-size' before sending them to an application handler for processing. Depending on the + |system's configuration, the rate at which event batches are sent to the handler may be throttled to avoid + |overwhelming it with too many events at once. + | + |Configured by 'maximum-in-flight-event-batches' parameter in the sequencer-client config + | + |The metric shows the configured upper limit on how many batches the application handler may process concurrently. + |The metric 'actual-in-flight-event-batches' tracks the actual number of currently processed batches.""", + qualification = Saturation, + ) + val maxInFlightEventBatches: Gauge[Int] = + metricsFactory.gauge(prefix :+ "max-in-flight-event-batches", 0)(MetricsContext.Empty) + } + + object submissions { + val prefix: MetricName = SequencerClientMetrics.this.prefix :+ "submissions" + + @MetricDoc.Tag( + summary = + "Number of sequencer send requests we have that are waiting for an outcome or timeout", + description = """Incremented on every successful send to the sequencer. + |Decremented when the event or an error is sequenced, or when the max-sequencing-time has elapsed.""", + qualification = Debug, + ) + val inFlight: Counter = metricsFactory.counter(prefix :+ "in-flight") + + @MetricDoc.Tag( + summary = "Rate and timings of send requests to the sequencer", + description = + """Provides a rate and time of how long it takes for send requests to be accepted by the sequencer. + |Note that this is just for the request to be made and not for the requested event to actually be sequenced. + |""", + qualification = Debug, + ) + val sends: Timer = metricsFactory.timer(prefix :+ "sends") + + @MetricDoc.Tag( + summary = "Rate and timings of sequencing requests", + description = + """This timer is started when a submission is made to the sequencer and then completed when a corresponding event + |is witnessed from the sequencer, so will encompass the entire duration for the sequencer to sequence the + |request. If the request does not result in an event no timing will be recorded. + |""", + qualification = Debug, + ) + val sequencingTime: Timer = metricsFactory.timer(prefix :+ "sequencing") + + @MetricDoc.Tag( + summary = "Count of send requests which receive an overloaded response", + description = + "Counter that is incremented if a send request receives an overloaded response from the sequencer.", + qualification = Debug, + ) + val overloaded: Counter = metricsFactory.counter(prefix :+ "overloaded") + + @MetricDoc.Tag( + summary = "Count of send requests that did not cause an event to be sequenced", + description = """Counter of send requests we did not witness a corresponding event to be sequenced by the + |supplied max-sequencing-time. There could be many reasons for this happening: the request may + |have been lost before reaching the sequencer, the sequencer may be at capacity and the + |the max-sequencing-time was exceeded by the time the request was processed, or the supplied + |max-sequencing-time may just be too small for the sequencer to be able to sequence the request.""", + qualification = Debug, + ) + val dropped: Counter = metricsFactory.counter(prefix :+ "dropped") + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/Endpoint.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/Endpoint.scala new file mode 100644 index 0000000000..f664028f78 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/Endpoint.scala @@ -0,0 +1,73 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.networking + +import cats.syntax.either.* +import cats.syntax.reducible.* +import cats.syntax.traverse.* +import com.daml.nonempty.NonEmpty +import com.daml.nonempty.catsinstances.* +import com.digitalasset.canton.config.RequireTypes.Port +import io.grpc.Attributes + +import java.net.URI + +/** Networking endpoint where host could be a hostname or ip address. */ +final case class Endpoint(host: String, port: Port) { + override def toString: String = s"$host:$port" + + def toURI(useTls: Boolean) = new URI(s"${if (useTls) "https" else "http"}://$toString") +} + +object Endpoint { + val ATTR_ENDPOINT: Attributes.Key[Endpoint] = + Attributes.Key.create("com.digitalasset.canton.networking.Endpoint") + + private val defaultHttpPort = 80 + private val defaultHttpsPort = 443 + private def defaultPort(useTls: Boolean): Int = if (useTls) defaultHttpsPort else defaultHttpPort + + /** Extracts from a list of URIs the endpoint configuration (host and port), as well as a flag indicating + * whether they all use TLS or all don't. Will return an error if endpoints are not consistent in their usage + * of TLS. + */ + def fromUris( + connections: NonEmpty[Seq[URI]] + ): Either[String, (NonEmpty[Seq[Endpoint]], Boolean)] = + for { + endpointsWithTlsFlag <- connections.toNEF.traverse(fromUri) + (endpoints, tlsFlags) = (endpointsWithTlsFlag.map(_._1), endpointsWithTlsFlag.map(_._2)) + // check that they all are using TLS, or all aren't + useTls <- tlsFlags.toNEF.reduceLeftM(Right(_): Either[String, Boolean])((a, b) => + Either.cond[String, Boolean]( + a == b, + b, + s"All domain connections must either use TLS or all not use TLS", + ) + ) + } yield (endpoints, useTls) + + private def fromUri(uri: URI): Either[String, (Endpoint, Boolean)] = { + val (scheme, host, portO) = ( + // default to https if the connection scheme is not supplied + Option(uri.getScheme).getOrElse("https"), + // `staging.canton.global` is assumed to be a path rather than a host + Option(uri.getHost).getOrElse(uri.getPath), + // java.net.URI will return -1 if no port is set in the URI string + Option(uri.getPort).filter(_ >= 0), + ) + for { + useTls <- scheme match { + case "https" => Right(true) + case "http" => Right(false) + case unknownScheme => + Left(s"Domain connection url [$uri] has unknown scheme: $unknownScheme") + } + port <- Port + .create(portO.getOrElse(defaultPort(useTls))) + .leftMap(err => s"Domain connection url [$uri] has an invalid port: $err") + } yield (Endpoint(host, port), useTls) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/ApiRequestLogger.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/ApiRequestLogger.scala new file mode 100644 index 0000000000..e5dc040a0c --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/ApiRequestLogger.scala @@ -0,0 +1,274 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.networking.grpc + +import com.digitalasset.canton.config.ApiLoggingConfig +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.tracing.{SerializableTraceContext, TraceContext, TraceContextGrpc} +import com.digitalasset.canton.util.ShowUtil.* +import com.google.common.annotations.VisibleForTesting +import io.grpc.ForwardingServerCall.SimpleForwardingServerCall +import io.grpc.ForwardingServerCallListener.SimpleForwardingServerCallListener +import io.grpc.Status.Code.* +import io.grpc.* + +import java.util.concurrent.atomic.AtomicBoolean +import scala.util.Try + +/** Server side interceptor that logs incoming and outgoing traffic. + * + * @param config Configuration to tailor the output + */ +class ApiRequestLogger( + override protected val loggerFactory: NamedLoggerFactory, + config: ApiLoggingConfig, +) extends ApiRequestLoggerBase(loggerFactory, config) + with ServerInterceptor + with NamedLogging { + + @VisibleForTesting + private[networking] val cancelled: AtomicBoolean = new AtomicBoolean(false) + + override def interceptCall[ReqT, RespT]( + call: ServerCall[ReqT, RespT], + headers: Metadata, + next: ServerCallHandler[ReqT, RespT], + ): ServerCall.Listener[ReqT] = { + val requestTraceContext: TraceContext = inferRequestTraceContext + + val sender = Option(call.getAttributes.get(Grpc.TRANSPORT_ATTR_REMOTE_ADDR).toString) + .getOrElse("unknown sender") + val method = call.getMethodDescriptor.getFullMethodName + + def createLogMessage(message: String): String = + show"Request ${method.readableLoggerName(config.maxMethodLength)} by ${sender.unquoted}: ${message.unquoted}" + + logger.trace(createLogMessage(s"received headers ${stringOfMetadata(headers)}"))( + requestTraceContext + ) + + val loggingServerCall = new LoggingServerCall(call, createLogMessage, requestTraceContext) + val serverCallListener = next.startCall(loggingServerCall, headers) + new LoggingServerCallListener(serverCallListener, createLogMessage, requestTraceContext) + } + + /** Intercepts events sent by the client. + */ + class LoggingServerCallListener[ReqT, RespT]( + delegate: ServerCall.Listener[ReqT], + createLogMessage: String => String, + requestTraceContext: TraceContext, + ) extends SimpleForwardingServerCallListener[ReqT](delegate) { + + /** Called when the server receives the request. */ + override def onMessage(message: ReqT): Unit = { + val traceContext = traceContextOfMessage(message).getOrElse(requestTraceContext) + logger.debug( + createLogMessage( + show"received a message ${cutMessage(message).unquoted}\n Request ${requestTraceContext.showTraceId}" + ) + )(traceContext) + logThrowable(delegate.onMessage(message))(createLogMessage, traceContext) + } + + /** Called when the client completed all message sending (except for cancellation). */ + override def onHalfClose(): Unit = { + logger.trace(createLogMessage(s"finished receiving messages"))(requestTraceContext) + logThrowable(delegate.onHalfClose())(createLogMessage, requestTraceContext) + } + + /** Called when the client cancels the call. */ + override def onCancel(): Unit = { + logger.info(createLogMessage("cancelled"))(requestTraceContext) + logThrowable(delegate.onCancel())(createLogMessage, requestTraceContext) + cancelled.set(true) + } + + /** Called when the server considers the call completed. */ + override def onComplete(): Unit = { + logger.debug(createLogMessage("completed"))(requestTraceContext) + logThrowable(delegate.onComplete())(createLogMessage, requestTraceContext) + } + + override def onReady(): Unit = { + // This call is "just a suggestion" according to the docs and turns out to be quite flaky, even in simple scenarios. + // Not logging therefore. + logThrowable(delegate.onReady())(createLogMessage, requestTraceContext) + } + } + + /** Intercepts events sent by the server. + */ + class LoggingServerCall[ReqT, RespT]( + delegate: ServerCall[ReqT, RespT], + createLogMessage: String => String, + requestTraceContext: TraceContext, + ) extends SimpleForwardingServerCall[ReqT, RespT](delegate) { + + /** Called when the server sends the response headers. */ + override def sendHeaders(headers: Metadata): Unit = { + logger.trace(createLogMessage(s"sending response headers ${cutMessage(headers)}"))( + requestTraceContext + ) + delegate.sendHeaders(headers) + } + + /** Called when the server sends a response. */ + override def sendMessage(message: RespT): Unit = { + val traceContext = traceContextOfMessage(message).getOrElse(requestTraceContext) + logger.debug( + createLogMessage( + s"sending response ${cutMessage(message)}\n Request ${requestTraceContext.showTraceId}" + ) + )(traceContext) + delegate.sendMessage(message) + } + + /** Called when the server closes the call. */ + override def close(status: Status, trailers: Metadata): Unit = { + val enhancedStatus = logStatusOnClose(status, trailers, createLogMessage)(requestTraceContext) + delegate.close(enhancedStatus, trailers) + } + } +} + +/** Base class for building gRPC API request loggers. + * Used in Canton network to build a client-side gRPC API + * request logger in addition to the server-side one. + * + * See https://github.com/DACH-NY/the-real-canton-coin/blob/bea9ccff84e72957aa7ac57ae3d1a00bc6d368d0/canton/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/ApiClientRequestLogger.scala#L16 + * + * @param config Configuration to tailor the output + */ +@SuppressWarnings(Array("org.wartremover.warts.Null")) +class ApiRequestLoggerBase( + override protected val loggerFactory: NamedLoggerFactory, + config: ApiLoggingConfig, +) extends NamedLogging { + + private lazy val printer = config.printer + + protected def logThrowable( + within: => Unit + )(createLogMessage: String => String, traceContext: TraceContext): Unit = { + try { + within + } catch { + // If the server implementation fails, the server method must return a failed future or call StreamObserver.onError. + // This handler is invoked, when an internal GRPC error occurs or the server implementation throws. + case t: Throwable => + logger.error(createLogMessage("failed with an unexpected throwable"), t)(traceContext) + t match { + case _: RuntimeException => + throw t + case _: Exception => + // Convert to a RuntimeException, because GRPC is unable to handle undeclared checked exceptions. + throw new RuntimeException(t) + case _: Throwable => + throw t + } + } + } + + protected def logStatusOnClose( + status: Status, + trailers: Metadata, + createLogMessage: String => String, + )(implicit requestTraceContext: TraceContext): Status = { + val enhancedStatus = enhance(status) + + val statusString = Option(enhancedStatus.getDescription).filterNot(_.isEmpty) match { + case Some(d) => s"${enhancedStatus.getCode}/$d" + case None => enhancedStatus.getCode.toString + } + + val trailersString = stringOfTrailers(trailers) + + if (enhancedStatus.getCode == Status.OK.getCode) { + logger.debug( + createLogMessage(s"succeeded($statusString)$trailersString"), + enhancedStatus.getCause, + ) + } else { + val message = createLogMessage(s"failed with $statusString$trailersString") + if (enhancedStatus.getCode == UNKNOWN || enhancedStatus.getCode == DATA_LOSS) { + logger.error(message, enhancedStatus.getCause) + } else if (enhancedStatus.getCode == INTERNAL) { + if (enhancedStatus.getDescription == "Half-closed without a request") { + // If a call is cancelled, GRPC may half-close the call before the first message has been delivered. + // The result is this status. + // Logging with INFO to not confuse the user. + // The status is still delivered to the client, to facilitate troubleshooting if there is a deeper problem. + logger.info(message, enhancedStatus.getCause) + } else { + logger.error(message, enhancedStatus.getCause) + } + } else if (enhancedStatus.getCode == UNAUTHENTICATED) { + logger.debug(message, enhancedStatus.getCause) + } else { + logger.info(message, enhancedStatus.getCause) + } + } + + enhancedStatus + } + + @SuppressWarnings(Array("org.wartremover.warts.Product")) + protected def cutMessage(message: Any): String = { + if (config.logMessagePayloads) { + printer.printAdHoc(message) + } else "" + } + + protected def stringOfTrailers(trailers: Metadata): String = + if (!config.logMessagePayloads || trailers == null || trailers.keys().isEmpty) { + "" + } else { + s"\n Trailers: ${stringOfMetadata(trailers)}" + } + + protected def stringOfMetadata(metadata: Metadata): String = + if (!config.logMessagePayloads || metadata == null) { + "" + } else { + metadata.toString.limit(config.maxMetadataSize).toString + } + + protected def enhance(status: Status): Status = { + if (status.getDescription == null && status.getCause != null) { + // Copy the exception message to the status in order to transmit it to the client. + // If you consider this a security risk: + // - Exceptions are logged. Therefore, exception messages must not contain confidential data anyway. + // - Note that scalapb.grpc.Grpc.completeObserver also copies exception messages into the status description. + // So removing this method would not mitigate the risk. + status.withDescription(status.getCause.getLocalizedMessage) + } else { + status + } + } + + protected def inferRequestTraceContext: TraceContext = { + val grpcTraceContext = TraceContextGrpc.fromGrpcContext + if (grpcTraceContext.traceId.isDefined) { + grpcTraceContext + } else { + TraceContext.withNewTraceContext(identity) + } + } + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + protected def traceContextOfMessage[A](message: Any): Option[TraceContext] = { + import scala.language.reflectiveCalls + for { + maybeTraceContextP <- Try( + message + .asInstanceOf[{ def traceContext: Option[com.digitalasset.canton.v0.TraceContext] }] + .traceContext + ).toOption + tc <- ProtoConverter.required("traceContextOfMessage", maybeTraceContextP).toOption + traceContext <- SerializableTraceContext.fromProtoV0(tc).toOption + } yield traceContext.unwrap + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonCommunityServerInterceptors.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonCommunityServerInterceptors.scala new file mode 100644 index 0000000000..86ba0c0f40 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonCommunityServerInterceptors.scala @@ -0,0 +1,60 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.networking.grpc + +import com.daml.metrics.grpc.{GrpcMetricsServerInterceptor, GrpcServerMetrics} +import com.digitalasset.canton.config.ApiLoggingConfig +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.tracing.{TraceContextGrpc, TracingConfig} +import io.grpc.ServerInterceptors.intercept +import io.grpc.ServerServiceDefinition + +import scala.util.chaining.* + +trait CantonServerInterceptors { + def addAllInterceptors( + service: ServerServiceDefinition, + withLogging: Boolean, + ): ServerServiceDefinition +} + +class CantonCommunityServerInterceptors( + tracingConfig: TracingConfig, + apiLoggingConfig: ApiLoggingConfig, + loggerFactory: NamedLoggerFactory, + grpcMetrics: GrpcServerMetrics, +) extends CantonServerInterceptors { + private def interceptForLogging( + service: ServerServiceDefinition, + withLogging: Boolean, + ): ServerServiceDefinition = + if (withLogging) { + intercept(service, new ApiRequestLogger(loggerFactory, apiLoggingConfig)) + } else { + service + } + + private def addTraceContextInterceptor( + service: ServerServiceDefinition + ): ServerServiceDefinition = + tracingConfig.propagation match { + case TracingConfig.Propagation.Disabled => service + case TracingConfig.Propagation.Enabled => + intercept(service, TraceContextGrpc.serverInterceptor) + } + + private def addMetricsInterceptor( + service: ServerServiceDefinition + ): ServerServiceDefinition = + intercept(service, new GrpcMetricsServerInterceptor(grpcMetrics)) + + def addAllInterceptors( + service: ServerServiceDefinition, + withLogging: Boolean, + ): ServerServiceDefinition = + service + .pipe(interceptForLogging(_, withLogging)) + .pipe(addTraceContextInterceptor) + .pipe(addMetricsInterceptor) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonGrpcUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonGrpcUtil.scala new file mode 100644 index 0000000000..a5c8eab245 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonGrpcUtil.scala @@ -0,0 +1,264 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.networking.grpc + +import cats.data.EitherT +import cats.implicits.* +import com.daml.error.{ErrorCategory, ErrorCode, Explanation, Resolution} +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.concurrent.DirectExecutionContext +import com.digitalasset.canton.error.CantonErrorGroups.GrpcErrorGroup +import com.digitalasset.canton.error.{BaseCantonError, CantonError} +import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, Lifecycle} +import com.digitalasset.canton.logging.{ErrorLoggingContext, TracedLogger} +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.{DelayUtil, EitherTUtil} +import io.grpc.* +import io.grpc.stub.AbstractStub + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.{Duration, FiniteDuration} +import scala.concurrent.{ExecutionContext, Future} +import scala.util.{Failure, Success} + +object CantonGrpcUtil { + def wrapErr[T](value: ParsingResult[T])(implicit + loggingContext: ErrorLoggingContext, + ec: ExecutionContext, + ): EitherT[Future, CantonError, T] = { + wrapErr(EitherT.fromEither[Future](value)) + } + def wrapErr[T](value: EitherT[Future, ProtoDeserializationError, T])(implicit + loggingContext: ErrorLoggingContext, + ec: ExecutionContext, + ): EitherT[Future, CantonError, T] = { + value.leftMap(x => ProtoDeserializationError.ProtoDeserializationFailure.Wrap(x): CantonError) + } + + @Deprecated + def mapErr[T, C](value: Either[T, C])(implicit + ec: ExecutionContext + ): EitherT[Future, StatusRuntimeException, C] = + mapErr(EitherT.fromEither[Future](value)) + + @Deprecated + def mapErr[T, C](value: EitherT[Future, T, C])(implicit + ec: ExecutionContext + ): EitherT[Future, StatusRuntimeException, C] = + value.leftMap(x => invalidArgument(x.toString)) + + def mapErrNew[T <: CantonError, C](value: Either[T, C])(implicit + ec: ExecutionContext + ): EitherT[Future, StatusRuntimeException, C] = + EitherT.fromEither[Future](value).leftMap(_.asGrpcError) + + def mapErrNewET[T <: CantonError, C](value: EitherT[Future, T, C])(implicit + ec: ExecutionContext + ): EitherT[Future, StatusRuntimeException, C] = + value.leftMap(_.asGrpcError) + + def mapErrNewETUS[T <: CantonError, C](value: EitherT[FutureUnlessShutdown, T, C])(implicit + ec: ExecutionContext, + errorLoggingContext: ErrorLoggingContext, + ): EitherT[Future, StatusRuntimeException, C] = + value.onShutdown(Left(AbortedDueToShutdown.Error())).leftMap(_.asGrpcError) + + def mapErrNew[T <: BaseCantonError, C](value: EitherT[Future, T, C])(implicit + executionContext: ExecutionContext, + errorLoggingContext: ErrorLoggingContext, + ): Future[C] = + EitherTUtil.toFuture(value.leftMap(_.asGrpcError)) + + def mapErrNew[T <: CantonError, C](value: EitherT[Future, T, C])(implicit + ec: ExecutionContext + ): Future[C] = + EitherTUtil.toFuture(value.leftMap(_.asGrpcError)) + + def mapErrNewEUS[T <: CantonError, C](value: EitherT[FutureUnlessShutdown, T, C])(implicit + ec: ExecutionContext, + errorLoggingContext: ErrorLoggingContext, + ): Future[C] = + EitherTUtil.toFuture(mapErrNewETUS(value)) + + @Deprecated + def invalidArgument(err: String): StatusRuntimeException = + Status.INVALID_ARGUMENT.withDescription(err).asRuntimeException() + + /** Wrapper method for sending a Grpc request. + * Takes care of appropriate logging and retrying. + * + * NOTE that this will NOT WORK for requests with streamed responses, as such requests will report errors to the + * corresponding [[io.grpc.stub.StreamObserver]]. You need to do error handling within the corresponding + * [[io.grpc.stub.StreamObserver]]. + * + * @param client the Grpc client used to send the request + * @param serverName used for logging + * @param send the client method for sending the request + * @param requestDescription used for logging + * @param timeout determines how long to retry or wait for a response. + * Will retry until 70% of this timeout has elapsed. + * Will wait for a response until this timeout has elapsed. + * @param logPolicy use this to configure log levels for errors + * @param retryPolicy invoked after an error to determine whether to retry + */ + def sendGrpcRequest[Svc <: AbstractStub[Svc], Res](client: Svc, serverName: String)( + send: Svc => Future[Res], + requestDescription: String, + timeout: Duration, + logger: TracedLogger, + logPolicy: GrpcError => TracedLogger => TraceContext => Unit = err => + logger => traceContext => err.log(logger)(traceContext), + retryPolicy: GrpcError => Boolean = _.retry, + )(implicit traceContext: TraceContext): EitherT[Future, GrpcError, Res] = { + implicit val ec: ExecutionContext = DirectExecutionContext(logger) + + // depending on the desired timeout, use a deadline or not + val (clientWithDeadline, calcEffectiveBackoff) = timeout match { + case finite: FiniteDuration => + // The deadline for individual requests. + val requestDeadline = Deadline.after(finite.length, finite.unit) + // After this deadline, we won't retry anymore. + // This deadline is significantly before `requestDeadline`, because we want to avoid DEADLINE_EXCEEDED due to overly short deadlines. + val retryDeadline = requestDeadline.offset(-finite.toMillis * 3 / 10, TimeUnit.MILLISECONDS) + ( + client.withDeadline(requestDeadline), + ( + backoffMs => Math.min(backoffMs, retryDeadline.timeRemaining(TimeUnit.MILLISECONDS)) + ): Long => Long, + ) + case Duration.Inf => + (client, (x => x): Long => Long) + case _ => + logger.error(s"Ignoring unexpected timeout ${timeout} value.") + (client, (x => x): Long => Long) + } + + def go(backoffMs: Long): Future[Either[GrpcError, Res]] = { + logger.debug(s"Sending request $requestDescription to $serverName.") + TraceContextGrpc.withGrpcContext(traceContext)(send(clientWithDeadline)).transformWith { + case Success(value) => + logger.debug(s"Request $requestDescription has succeeded for $serverName.") + Future.successful(Right(value)) + case Failure(e: StatusRuntimeException) => + val error = GrpcError(requestDescription, serverName, e) + logPolicy(error)(logger)(traceContext) + if (retryPolicy(error)) { + val effectiveBackoff = calcEffectiveBackoff(backoffMs) + if (effectiveBackoff > 0) { + logger.info(s"Waiting for ${effectiveBackoff}ms before retrying...") + DelayUtil + .delay(FiniteDuration.apply(effectiveBackoff, TimeUnit.MILLISECONDS)) + .flatMap { _ => + logger.info(s"Retrying request $requestDescription for $serverName...") + go(backoffMs * 2) + } + } else { + logger.warn("Retry timeout has elapsed, giving up.") + Future.successful(Left(error)) + } + } else { + logger.debug( + s"Retry has not been configured for ${error.getClass.getSimpleName}, giving up." + ) + Future.successful(Left(error)) + } + case Failure(e) => + logger + .error( + s"An unexpected exception has occurred while sending request $requestDescription to $serverName.", + e, + ) + Future.failed(e) + } + } + EitherT(go(1)) + } + + /** Method to create a grpc channel and send a single request + * + * Based on [[sendGrpcRequest]] + */ + def sendSingleGrpcRequest[Svc <: AbstractStub[Svc], Res]( + serverName: String, + requestDescription: String, + channel: ManagedChannel, + stubFactory: Channel => Svc, + timeout: Duration, + logger: TracedLogger, + logPolicy: GrpcError => TracedLogger => TraceContext => Unit = err => + logger => traceContext => err.log(logger)(traceContext), + retryPolicy: GrpcError => Boolean = _.retry, + )( + send: Svc => Future[Res] + )(implicit traceContext: TraceContext): EitherT[Future, GrpcError, Res] = { + + val closeableChannel = Lifecycle.toCloseableChannel(channel, logger, "sendSingleGrpcRequest") + val stub = stubFactory(closeableChannel.channel) + + val res = sendGrpcRequest(stub, serverName)( + send(_), + requestDescription, + timeout, + logger, + logPolicy, + retryPolicy, + ) + + implicit val ec = DirectExecutionContext(logger) + res.thereafter { _ => + closeableChannel.close() + } + } + + def silentLogPolicy(error: GrpcError)(logger: TracedLogger)(traceContext: TraceContext): Unit = { + // Log an info, if a cause is defined to not discard the cause information + Option(error.status.getCause).foreach { cause => + logger.info(error.toString, cause)(traceContext) + } + } + + def retryUnlessClosing(closing: () => Boolean)(error: GrpcError): Boolean = { + !closing() && error.retry + } + + object RetryPolicy { + lazy val noRetry: GrpcError => Boolean = _ => false + } + + /** The name of the service that is associated with the sequencer servers' health status. + * This name can have no relation with the gRPC services that the server is running with, and can be anything + * as long as the client and servers use the same value. + */ + val sequencerHealthCheckServiceName = "sequencer-health-check-service" + + object GrpcErrors extends GrpcErrorGroup { + + /** Canton Error that can be used in Grpc Services to signal that a request could not be processed + * successfully due to the node shutting down + */ + @Explanation( + "This error is returned when processing of the request was aborted due to the node shutting down." + ) + @Resolution( + "Retry the request against an active and available node." + ) + object AbortedDueToShutdown + extends ErrorCode( + id = "ABORTED_DUE_TO_SHUTDOWN", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + final case class Error()(implicit val loggingContext: ErrorLoggingContext) + extends CantonError.Impl("request aborted due to shutdown") + } + } + + implicit class GrpcFUSExtended[A](val f: FutureUnlessShutdown[A]) extends AnyVal { + def asGrpcResponse(implicit ec: ExecutionContext, elc: ErrorLoggingContext): Future[A] = { + f.failOnShutdownTo(GrpcErrors.AbortedDueToShutdown.Error().asGrpcError) + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonServerBuilder.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonServerBuilder.scala new file mode 100644 index 0000000000..2a51fe244b --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonServerBuilder.scala @@ -0,0 +1,210 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.networking.grpc + +import com.daml.metrics.api.MetricName +import com.daml.metrics.grpc.GrpcServerMetrics +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.config.* +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.metrics.MetricHandle +import com.digitalasset.canton.tracing.TracingConfig +import io.grpc.* +import io.grpc.netty.{GrpcSslContexts, NettyServerBuilder} +import io.grpc.util.MutableHandlerRegistry +import io.netty.handler.ssl.{SslContext, SslContextBuilder} + +import java.net.InetSocketAddress +import java.util.concurrent.{Executor, TimeUnit} +import scala.annotation.nowarn + +/** The [[io.grpc.ServerBuilder]] is pretty "loose" with its type parameters + * causing some issues for `scalac` and IntelliJ. + * Here we provide a wrapper hiding these type issues. + */ +trait CantonServerBuilder { + def mutableHandlerRegistry(): CantonMutableHandlerRegistry + + def addService(service: BindableService, withLogging: Boolean): CantonServerBuilder + + def addService(service: ServerServiceDefinition, withLogging: Boolean = true): CantonServerBuilder + + def build: Server + + def maxInboundMessageSize(bytes: NonNegativeInt): CantonServerBuilder +} + +trait CantonMutableHandlerRegistry extends AutoCloseable { + def addService( + service: ServerServiceDefinition, + withLogging: Boolean = true, + ): (ServerServiceDefinition, CantonMutableHandlerRegistry) + + def addServiceU( + service: ServerServiceDefinition, + withLogging: Boolean = true, + ): Unit = addService(service, withLogging).discard + + def removeService(service: ServerServiceDefinition): CantonMutableHandlerRegistry + + def removeServiceU(service: ServerServiceDefinition): Unit = removeService(service).discard +} + +object CantonServerBuilder { + + /** Creates our wrapper for a grpc ServerBuilder. + * As we only create our servers from our configuration this is intentionally private. + */ + private class BaseBuilder( + serverBuilder: ServerBuilder[_ <: ServerBuilder[?]], + interceptors: CantonServerInterceptors, + ) extends CantonServerBuilder { + + override def mutableHandlerRegistry(): CantonMutableHandlerRegistry = + new CantonMutableHandlerRegistry { + val registry = new MutableHandlerRegistry() + serverBuilder.fallbackHandlerRegistry(registry) + + override def addService( + service: ServerServiceDefinition, + withLogging: Boolean, + ): (ServerServiceDefinition, CantonMutableHandlerRegistry) = { + val serverServiceDefinition = interceptors.addAllInterceptors(service, withLogging) + registry.addService(serverServiceDefinition) + + // addAllInterceptors call returns a new wrapped ServerServiceDefinition reference + // Hence, return the new reference for allowing removal in removeService. + serverServiceDefinition -> this + } + + override def removeService( + service: ServerServiceDefinition + ): CantonMutableHandlerRegistry = { + + registry.removeService(service) + this + } + + override def close(): Unit = { + for (_ <- 0 until registry.getServices.size()) { + registry + .removeService(registry.getServices.get(registry.getServices.size() - 1)) + .discard[Boolean] + } + } + } + + override def addService(service: BindableService, withLogging: Boolean): CantonServerBuilder = { + serverBuilder.addService(interceptors.addAllInterceptors(service.bindService(), withLogging)) + this + } + + override def maxInboundMessageSize(bytes: NonNegativeInt): CantonServerBuilder = { + serverBuilder.maxInboundMessageSize(bytes.unwrap) + this + } + + override def addService( + service: ServerServiceDefinition, + withLogging: Boolean, + ): CantonServerBuilder = { + serverBuilder.addService(interceptors.addAllInterceptors(service, withLogging)) + this + } + + override def build: Server = serverBuilder.build() + } + + def configureKeepAlive( + keepAlive: Option[KeepAliveServerConfig], + builder: NettyServerBuilder, + ): NettyServerBuilder = { + keepAlive.fold(builder) { opt => + val time = opt.time.unwrap.toMillis + val timeout = opt.timeout.unwrap.toMillis + val permitTime = opt.permitKeepAliveTime.unwrap.toMillis + builder + .keepAliveTime(time, TimeUnit.MILLISECONDS) + .keepAliveTimeout(timeout, TimeUnit.MILLISECONDS) + .permitKeepAliveTime( + permitTime, + TimeUnit.MILLISECONDS, + ) // gracefully allowing a bit more aggressive keep alives from clients + } + } + + /** Create a GRPC server build using conventions from our configuration. + * @param config server configuration + * @return builder to attach application services and interceptors + */ + def forConfig( + config: ServerConfig, + metricsPrefix: MetricName, + @nowarn("cat=deprecation") metricsFactory: MetricHandle.MetricsFactory, + executor: Executor, + loggerFactory: NamedLoggerFactory, + apiLoggingConfig: ApiLoggingConfig, + tracing: TracingConfig, + grpcMetrics: GrpcServerMetrics, + ): CantonServerBuilder = { + val builder = + NettyServerBuilder + .forAddress(new InetSocketAddress(config.address, config.port.unwrap)) + .executor(executor) + .maxInboundMessageSize(config.maxInboundMessageSize.unwrap) + + val builderWithSsl = config.sslContext match { + case Some(sslContext) => + builder.sslContext(sslContext) + case None => + builder + } + + new BaseBuilder( + reifyBuilder(configureKeepAlive(config.keepAliveServer, builderWithSsl)), + config.instantiateServerInterceptors( + tracing, + apiLoggingConfig, + metricsPrefix, + metricsFactory, + loggerFactory, + grpcMetrics, + ), + ) + } + + private def baseSslBuilder(config: BaseTlsArguments): SslContextBuilder = { + import scala.jdk.CollectionConverters.* + val s1 = + GrpcSslContexts.forServer(config.certChainFile.unwrap, config.privateKeyFile.unwrap) + val s2 = config.protocols.fold(s1)(protocols => s1.protocols(protocols *)) + config.ciphers.fold(s2)(ciphers => s2.ciphers(ciphers.asJava)) + } + + def baseSslContext(config: TlsBaseServerConfig): SslContext = baseSslBuilder(config).build() + + def sslContext(config: TlsServerConfig): SslContext = { + // TODO(#7086): secrets service support not yet implemented for canton admin services + config.secretsUrl.foreach { url => + throw new IllegalArgumentException( + s"Canton admin services do not yet support 'Secrets Service' $url." + ) + } + + val s1 = baseSslBuilder(config) + val s2 = config.trustCollectionFile.fold(s1)(trustCollection => + s1.trustManager(trustCollection.unwrap) + ) + val s3 = s2.clientAuth(config.clientAuth.clientAuth) + s3.build() + } + + /** We know this operation is safe due to the definition of [[io.grpc.ServerBuilder]]. + * This method isolates the usage of `asInstanceOf` to only here. + */ + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + private def reifyBuilder(builder: ServerBuilder[?]): ServerBuilder[_ <: ServerBuilder[?]] = + builder.asInstanceOf[ServerBuilder[_ <: ServerBuilder[?]]] +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/ClientChannelBuilder.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/ClientChannelBuilder.scala new file mode 100644 index 0000000000..5f88cc4d1f --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/ClientChannelBuilder.scala @@ -0,0 +1,164 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.networking.grpc + +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.config.{ClientConfig, KeepAliveClientConfig, TlsClientConfig} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.networking.Endpoint +import com.digitalasset.canton.tracing.TracingConfig.Propagation +import com.digitalasset.canton.tracing.{NoTracing, TraceContextGrpc} +import com.digitalasset.canton.util.ResourceUtil.withResource +import com.google.protobuf.ByteString +import io.grpc.ManagedChannel +import io.grpc.netty.{GrpcSslContexts, NettyChannelBuilder} +import io.netty.handler.ssl.SslContext + +import java.util.concurrent.atomic.AtomicReference +import java.util.concurrent.{Executor, TimeUnit} + +/** Construct a GRPC channel to be used by a client within canton. */ +trait ClientChannelBuilder { + def create( + endpoints: NonEmpty[Seq[Endpoint]], + useTls: Boolean, + executor: Executor, + trustCertificate: Option[ByteString] = None, + traceContextPropagation: Propagation = Propagation.Disabled, + maxInboundMessageSize: Option[NonNegativeInt] = None, + keepAliveClient: Option[KeepAliveClientConfig] = None, + ): NettyChannelBuilder = { + // the bulk of this channel builder is the same between community and enterprise + // we only extract the bits that are different into calls to the protected implementation specific methods + + // the builder calls mutate this instance so is fine to assign to a val + val builder = createNettyChannelBuilder(endpoints) + additionalChannelBuilderSettings(builder, endpoints) + + builder.executor(executor) + maxInboundMessageSize.foreach(s => builder.maxInboundMessageSize(s.unwrap)) + ClientChannelBuilder.configureKeepAlive(keepAliveClient, builder).discard + if (traceContextPropagation == Propagation.Enabled) + builder.intercept(TraceContextGrpc.clientInterceptor).discard + + if (useTls) { + builder + .useTransportSecurity() // this is strictly unnecessary as is the default for the channel builder, but can't hurt either + + // add certificates if provided + trustCertificate + .fold(builder) { certChain => + val sslContext = withResource(certChain.newInput()) { inputStream => + GrpcSslContexts.forClient().trustManager(inputStream).build() + } + builder.sslContext(sslContext) + } + .discard + } else + builder.usePlaintext().discard + + builder + } + + /** Create the initial netty channel builder before customizing settings */ + protected def createNettyChannelBuilder(endpoints: NonEmpty[Seq[Endpoint]]): NettyChannelBuilder + + /** Set implementation specific channel settings */ + protected def additionalChannelBuilderSettings( + builder: NettyChannelBuilder, + endpoints: NonEmpty[Seq[Endpoint]], + ): Unit = () +} + +trait ClientChannelBuilderFactory extends (NamedLoggerFactory => ClientChannelBuilder) + +/** Supports creating GRPC channels but only supports a single host. + * If multiple endpoints are provided a warning will be logged and the first supplied will be used. + */ +class CommunityClientChannelBuilder(protected val loggerFactory: NamedLoggerFactory) + extends ClientChannelBuilder + with NamedLogging + with NoTracing { + + /** Create the initial netty channel builder before customizing settings */ + override protected def createNettyChannelBuilder( + endpoints: NonEmpty[Seq[Endpoint]] + ): NettyChannelBuilder = { + val singleHost = endpoints.head1 + + // warn that community does not support more than one domain connection if we've been passed multiple + if (endpoints.size > 1) { + logger.warn( + s"Canton Community does not support using many connections for a domain. Defaulting to first: $singleHost" + ) + } + + NettyChannelBuilder.forAddress(singleHost.host, singleHost.port.unwrap) + } +} + +object CommunityClientChannelBuilder extends ClientChannelBuilderFactory { + override def apply(loggerFactory: NamedLoggerFactory): ClientChannelBuilder = + new CommunityClientChannelBuilder(loggerFactory) +} + +object ClientChannelBuilder { + // basic service locator to prevent having to pass these instances around everywhere + private lazy val factoryRef = + new AtomicReference[ClientChannelBuilderFactory](CommunityClientChannelBuilder) + def apply(loggerFactory: NamedLoggerFactory): ClientChannelBuilder = + factoryRef.get()(loggerFactory) + private[canton] def setFactory(factory: ClientChannelBuilderFactory): Unit = + factoryRef.set(factory) + + def sslContext(tls: TlsClientConfig): SslContext = { + val builder = GrpcSslContexts + .forClient() + val trustBuilder = tls.trustCollectionFile.fold(builder)(trustCollection => + builder.trustManager(trustCollection.unwrap) + ) + tls.clientCert + .fold(trustBuilder)(cc => trustBuilder.keyManager(cc.certChainFile, cc.privateKeyFile)) + .build() + } + + def configureKeepAlive( + keepAlive: Option[KeepAliveClientConfig], + builder: NettyChannelBuilder, + ): NettyChannelBuilder = { + keepAlive.fold(builder) { opt => + val time = opt.time.unwrap + val timeout = opt.timeout.unwrap + builder + .keepAliveTime(time.toMillis, TimeUnit.MILLISECONDS) + .keepAliveTimeout(timeout.toMillis, TimeUnit.MILLISECONDS) + } + } + + /** Simple channel construction for test and console clients. + * `maxInboundMessageSize` is 2GB; so don't use this to connect to an untrusted server. + */ + def createChannelToTrustedServer( + clientConfig: ClientConfig + )(implicit executor: Executor): ManagedChannel = { + val baseBuilder: NettyChannelBuilder = NettyChannelBuilder + .forAddress(clientConfig.address, clientConfig.port.unwrap) + .executor(executor) + .maxInboundMessageSize(Int.MaxValue) + + // apply keep alive settings + configureKeepAlive( + clientConfig.keepAliveClient, + // if tls isn't configured assume that it's a plaintext channel + clientConfig.tls + .fold(baseBuilder.usePlaintext()) { tls => + baseBuilder + .useTransportSecurity() + .sslContext(sslContext(tls)) + }, + ).build() + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/GrpcError.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/GrpcError.scala new file mode 100644 index 0000000000..36db555f72 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/GrpcError.scala @@ -0,0 +1,235 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.networking.grpc + +import com.digitalasset.canton.error.DecodedRpcStatus +import com.digitalasset.canton.error.ErrorCodeUtils.errorCategoryFromString +import com.digitalasset.canton.logging.TracedLogger +import com.digitalasset.canton.sequencing.authentication.MemberAuthentication.{ + MissingToken, + ParticipantDisabled, +} +import com.digitalasset.canton.sequencing.authentication.grpc.Constant +import com.digitalasset.canton.tracing.TraceContext +import io.grpc.Status.Code.* +import io.grpc.{Metadata, Status, StatusRuntimeException} + +import scala.annotation.nowarn + +sealed trait GrpcError { + + def request: String + def serverName: String + def status: Status + def decodedRpcStatus: Option[DecodedRpcStatus] + def optTrailers: Option[Metadata] + def hint: String = "" + + protected def logFullCause: Boolean = true + + override def toString: String = { + val trailersString = (optTrailers, decodedRpcStatus) match { + case (_, Some(rpc)) => + "\n " + (rpc.correlationId.toList.map(s => s"CorrelationId: $s") ++ rpc.retryIn + .map(s => s"RetryIn: $s") + .toList ++ Seq(s"Context: ${rpc.context}")).mkString("\n ") + case (Some(trailers), None) if !trailers.keys.isEmpty => s"\n Trailers: $trailers" + case _ => "" + } + + val causes = GrpcError.collectCauses(Option(status.getCause)) + val causesString = if (causes.isEmpty) "" else causes.mkString("\n Causes: ", "\n ", "") + + s"""Request failed for $serverName.$hint + | ${getClass.getSimpleName}: ${status.getCode}/${status.getDescription} + | Request: $request""".stripMargin + trailersString + causesString + } + + def log(logger: TracedLogger)(implicit traceContext: TraceContext): Unit = + if (logFullCause) + logger.warn(toString, status.getCause) + else { + logger.warn(toString) + logger.debug("The warning was caused by:", status.getCause) + } + + def retry: Boolean = decodedRpcStatus.exists(_.isRetryable) +} + +object GrpcError { + + def collectCauses(maybeThrowable: Option[Throwable]): Seq[String] = + maybeThrowable match { + case Some(t) => t.getMessage +: collectCauses(Option(t.getCause)) + case None => Seq.empty + } + + /** The server has refused the request, because it is invalid. + * The client should not have sent the request. + * The server has not processed the request. + * It does not make sense to retry. + */ + final case class GrpcClientError( + request: String, + serverName: String, + status: Status, + optTrailers: Option[Metadata], + decodedRpcStatus: Option[DecodedRpcStatus], + ) extends GrpcError { + override def log(logger: TracedLogger)(implicit traceContext: TraceContext): Unit = + logger.error(toString, status.getCause) + } + + /** An internal error has occurred at the server. + * The server may have partially processed the request. + * It does not make sense to retry. + */ + final case class GrpcServerError( + request: String, + serverName: String, + status: Status, + optTrailers: Option[Metadata], + decodedRpcStatus: Option[DecodedRpcStatus], + ) extends GrpcError { + override def log(logger: TracedLogger)(implicit traceContext: TraceContext): Unit = + logger.error(toString, status.getCause) + } + + def checkAuthenticationError(optTrailers: Option[Metadata], expectAny: Seq[String]): Boolean = { + val optErrorCode = for { + trailers <- optTrailers + errorCode <- Option(trailers.get(Constant.AUTHENTICATION_ERROR_CODE)) + } yield errorCode + expectAny.exists(optErrorCode.contains) + } + + /** The server was unable to process the request. + * The server has not processed the request. + * It may or may not make sense to retry, depending on the specific situation. + */ + final case class GrpcRequestRefusedByServer( + request: String, + serverName: String, + status: Status, + optTrailers: Option[Metadata], + decodedRpcStatus: Option[DecodedRpcStatus], + ) extends GrpcError { + + lazy val isAuthenticationTokenMissing: Boolean = + checkAuthenticationError(optTrailers, Seq(MissingToken.toString)) + + override def log(logger: TracedLogger)(implicit traceContext: TraceContext): Unit = + if (isAuthenticationTokenMissing) { + // Logging INFO only, as this happens from time to time due to token expiration. + // Warn would be more natural, but very hard to manage in tests. + logger.info(toString, status.getCause) + } else { + logger.warn(toString, status.getCause) + } + } + + /** The client gave up waiting for a response. + * The server may or may not process the request. + * It may or may not make sense to retry, depending on the specific situation. + */ + final case class GrpcClientGaveUp( + request: String, + serverName: String, + status: Status, + optTrailers: Option[Metadata], + decodedRpcStatus: Option[DecodedRpcStatus], + ) extends GrpcError { + + lazy val isClientCancellation: Boolean = status.getCode == CANCELLED && status.getCause == null + + override def log(logger: TracedLogger)(implicit traceContext: TraceContext): Unit = + if (isClientCancellation) { + logger.info(toString, status.getCause) + } else { + logger.warn(toString, status.getCause) + } + } + + private def lastCause(throwable: Throwable): Throwable = { + Option(throwable.getCause).fold(throwable)(lastCause) + } + + /** The server or the service was unavailable. + * The server has not processed the request. + * It makes sense to retry. + */ + final case class GrpcServiceUnavailable( + request: String, + serverName: String, + status: Status, + optTrailers: Option[Metadata], + decodedRpcStatus: Option[DecodedRpcStatus], + ) extends GrpcError { + override def logFullCause: Boolean = _logFullCause + override def hint: String = _hint + private lazy val (_retry, _hint, _logFullCause) = status.getCode match { + case UNAVAILABLE => + Option(status.getCause).map(lastCause) match { + case Some(_: javax.net.ssl.SSLException) => + (false, " Are you using the right TLS settings?", true) + case Some(_: java.net.UnknownHostException) => (false, " Is the url correct?", true) + case _ => + // Mentioning TLS again, because sometimes we don't get an SSLException despite an SSL problem. + ( + true, + " Is the server running? Did you configure the server address as 0.0.0.0? Are you using the right TLS settings? (details logged as DEBUG)", + false, + ) + } + case UNIMPLEMENTED => + (true, " Is the server initialized or is the server incompatible?", true) + case CANCELLED => (true, " Server seems to have crashed", true) + case _ => (true, "", true) + } + + override def retry: Boolean = _retry + } + + @nowarn("msg=match may not be exhaustive") + def apply(request: String, serverName: String, e: StatusRuntimeException): GrpcError = { + val status = e.getStatus + val optTrailers = Option(e.getTrailers) + val rpcStatus = DecodedRpcStatus.fromStatusRuntimeException(e) + + status.getCode match { + case INVALID_ARGUMENT | UNAUTHENTICATED + if !checkAuthenticationError( + optTrailers, + Seq(MissingToken.toString, ParticipantDisabled.toString), + ) => + GrpcClientError(request, serverName, status, optTrailers, rpcStatus) + + case FAILED_PRECONDITION | NOT_FOUND | OUT_OF_RANGE | RESOURCE_EXHAUSTED | ABORTED | + PERMISSION_DENIED | UNAUTHENTICATED | ALREADY_EXISTS => + GrpcRequestRefusedByServer(request, serverName, status, optTrailers, rpcStatus) + + case DEADLINE_EXCEEDED | CANCELLED => + GrpcClientGaveUp(request, serverName, status, optTrailers, rpcStatus) + + case UNAVAILABLE if errorCategoryFromString(status.getDescription).nonEmpty => + GrpcClientError(request, serverName, status, optTrailers, rpcStatus) + + case UNAVAILABLE | UNIMPLEMENTED => + GrpcServiceUnavailable(request, serverName, status, optTrailers, rpcStatus) + + case INTERNAL | UNKNOWN | DATA_LOSS => + GrpcServerError(request, serverName, status, optTrailers, rpcStatus) + + case OK => + GrpcServerError( + request, + serverName, + status, + optTrailers, + rpcStatus, + ) // broken, as a call should never fail with status OK + } + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/package.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/package.scala new file mode 100644 index 0000000000..2c368b0f8d --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/package.scala @@ -0,0 +1,145 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset + +import com.daml.lf.command.ReplayCommand +import com.daml.lf.data.{IdString, Ref, Time} +import com.daml.lf.transaction.{ContractStateMachine, Versioned} +import com.daml.lf.value.Value +import com.digitalasset.canton.data.{Counter, CounterCompanion} +import com.digitalasset.canton.ledger.configuration +import com.digitalasset.canton.serialization.DeterministicEncoding.encodeLong +import com.google.protobuf.ByteString + +package object canton { + + // Lf type for other ledger scalars, e.g. application, command and workflow id + // LfLedgerString has a length limit of 255 characters and contains alphanumeric characters and an itemized set of + // separators including _, :, - and even spaces + type LfLedgerString = Ref.LedgerString + val LfLedgerString: Ref.LedgerString.type = Ref.LedgerString + + // A party identifier representation in LF. See [[com.digitalasset.canton.topology.PartyId]] for the party identifier + // in Canton. + type LfPartyId = Ref.Party + val LfPartyId: Ref.Party.type = Ref.Party + + // Ledger participant id + type LedgerParticipantId = Ref.ParticipantId + val LedgerParticipantId: Ref.ParticipantId.type = Ref.ParticipantId + + // Ledger submission id + type LedgerSubmissionId = Ref.SubmissionId + val LedgerSubmissionId: Ref.SubmissionId.type = Ref.SubmissionId + + // Ledger command id + type LedgerCommandId = Ref.CommandId + val LedgerCommandId: Ref.CommandId.type = Ref.CommandId + + // Ledger application id + type LedgerApplicationId = Ref.ApplicationId + val LedgerApplicationId: Ref.ApplicationId.type = Ref.ApplicationId + + // Ledger configuration + type LedgerConfiguration = configuration.Configuration + val LedgerConfiguration: configuration.Configuration.type = configuration.Configuration + + // Ledger transaction id + type LedgerTransactionId = Ref.TransactionId + val LedgerTransactionId: Ref.TransactionId.type = Ref.TransactionId + + // Exercise choice name + type LfChoiceName = Ref.ChoiceName + val LfChoiceName: Ref.ChoiceName.type = Ref.ChoiceName + + type LfPackageId = Ref.PackageId + val LfPackageId: Ref.PackageId.type = Ref.PackageId + + type LfInterfaceId = Ref.TypeConName + val LfInterfaceId: Ref.TypeConName.type = Ref.TypeConName + + // Timestamp used by lf and sync api + type LfTimestamp = Time.Timestamp + val LfTimestamp: Time.Timestamp.type = Time.Timestamp + + type LfValue = Value + val LfValue: Value.type = Value + + type LfVersioned[T] = Versioned[T] + val LfVersioned: Versioned.type = Versioned + + // Lf commands for use by lf engine.reinterpret + type LfCommand = ReplayCommand + val LfCommand: ReplayCommand.type = ReplayCommand + + type LfCreateCommand = LfCommand.Create + val LfCreateCommand: LfCommand.Create.type = LfCommand.Create + + type LfExerciseCommand = LfCommand.Exercise + val LfExerciseCommand: LfCommand.Exercise.type = LfCommand.Exercise + + type LfExerciseByKeyCommand = LfCommand.ExerciseByKey + val LfExerciseByKeyCommand: LfCommand.ExerciseByKey.type = LfCommand.ExerciseByKey + + type LfFetchCommand = LfCommand.Fetch + val LfFetchCommand: LfCommand.Fetch.type = LfCommand.Fetch + + type LfFetchByKeyCommand = LfCommand.FetchByKey + val LfFetchByKeyCommand: LfCommand.FetchByKey.type = LfCommand.FetchByKey + + type LfLookupByKeyCommand = LfCommand.LookupByKey + val LfLookupByKeyCommand: LfCommand.LookupByKey.type = LfCommand.LookupByKey + + type LfWorkflowId = Ref.WorkflowId + val LfWorkflowId: Ref.WorkflowId.type = Ref.WorkflowId + + type LfKeyResolver = ContractStateMachine.KeyResolver + + /** The counter assigned by the sequencer to messages sent to the participant. + * The counter is specific to every participant. + */ + type SequencerCounterDiscriminator + type SequencerCounter = Counter[SequencerCounterDiscriminator] + + val SequencerCounter = new CounterCompanion[SequencerCounterDiscriminator] {} + + /** The counter assigned by the transaction processor to confirmation and transfer requests. */ + type RequestCounterDiscriminator + type RequestCounter = Counter[RequestCounterDiscriminator] + + /** The counter assigned to a contract to count the number of its transfers */ + type TransferCounterDiscriminator + type TransferCounter = Counter[TransferCounterDiscriminator] + + object TransferCounter extends CounterCompanion[TransferCounterDiscriminator] { + def encodeDeterministically(transferCounter: TransferCounter): ByteString = encodeLong( + transferCounter.unwrap + ) + } + + /** A transfer counter. + */ + type TransferCounterO = Option[TransferCounter] + + object RequestCounter extends CounterCompanion[RequestCounterDiscriminator] + + /** Wrap a method call with this method to document that the caller is sure that the callee's preconditions are met. */ + def checked[A](x: => A): A = x + + /** Evaluate the expression and discard the result. */ + implicit final class DiscardOps[A](private val a: A) extends AnyVal { + @inline + def discard[B](implicit ev: A =:= B): Unit = () + } + + implicit val lfPartyOrdering: Ordering[LfPartyId] = + IdString.`Party order instance`.toScalaOrdering + + /** Use this type when scalac struggles to infer `Nothing` + * due to it being treated specially. + * + * see https://www.reddit.com/r/scala/comments/73791p/nothings_twin_brother_the_better_one/ + */ + type Uninhabited <: Nothing +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/AgreementText.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/AgreementText.scala new file mode 100644 index 0000000000..a7d8b84079 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/AgreementText.scala @@ -0,0 +1,10 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +final case class AgreementText(v: String) extends AnyVal + +object AgreementText { + val empty = AgreementText("") +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/CantonContractIdVersion.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/CantonContractIdVersion.scala new file mode 100644 index 0000000000..63eef4e2f7 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/CantonContractIdVersion.scala @@ -0,0 +1,106 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import cats.syntax.either.* +import com.daml.ledger.javaapi.data.codegen.ContractId +import com.daml.lf.data.Bytes +import com.digitalasset.canton.checked +import com.digitalasset.canton.config.CantonRequireTypes.String255 +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.ledger.api.refinements.ApiTypes +import com.google.protobuf.ByteString + +object CantonContractIdVersion { + val versionPrefixBytesSize = 2 + + def ensureCantonContractId( + contractId: LfContractId + ): Either[MalformedContractId, CantonContractIdVersion] = { + val LfContractId.V1(_discriminator, suffix) = contractId + for { + versionedContractId <- CantonContractIdVersion + .fromContractSuffix(suffix) + .leftMap(error => MalformedContractId(contractId.toString, error)) + + unprefixedSuffix = suffix.slice(versionPrefixBytesSize, suffix.length) + + _ <- Hash + .fromByteString(unprefixedSuffix.toByteString) + .leftMap(err => MalformedContractId(contractId.toString, err.message)) + } yield versionedContractId + } + + def fromContractSuffix(contractSuffix: Bytes): Either[String, CantonContractIdVersion] = { + if (contractSuffix.startsWith(AuthenticatedContractIdVersionV2.versionPrefixBytes)) { + Right(AuthenticatedContractIdVersionV2) + } else { + Left( + s"""Suffix ${contractSuffix.toHexString} does not start with one of the supported prefixes: + | ${AuthenticatedContractIdVersionV2.versionPrefixBytes} + |""".stripMargin.replaceAll("\r|\n", "") + ) + } + } +} + +sealed trait CantonContractIdVersion extends Serializable with Product { + require( + versionPrefixBytes.length == CantonContractIdVersion.versionPrefixBytesSize, + s"Version prefix of size ${versionPrefixBytes.length} should have size ${CantonContractIdVersion.versionPrefixBytesSize}", + ) + + def isAuthenticated: Boolean + + def versionPrefixBytes: Bytes + + def fromDiscriminator(discriminator: LfHash, unicum: Unicum): LfContractId.V1 = + LfContractId.V1(discriminator, unicum.toContractIdSuffix(this)) +} + +case object AuthenticatedContractIdVersionV2 extends CantonContractIdVersion { + // The prefix for the suffix of Canton contract IDs for contracts that can be authenticated (created in Protocol V5+) + lazy val versionPrefixBytes: Bytes = Bytes.fromByteArray(Array(0xca.toByte, 0x02.toByte)) + + val isAuthenticated: Boolean = true +} + +object ContractIdSyntax { + implicit class ScalaCodegenContractIdSyntax[T](contractId: ApiTypes.ContractId) { + def toLf: LfContractId = LfContractId.assertFromString(contractId.toString) + } + + implicit class JavaCodegenContractIdSyntax[T](contractId: ContractId[?]) { + def toLf: LfContractId = LfContractId.assertFromString(contractId.contractId) + } + + implicit class LfContractIdSyntax(private val contractId: LfContractId) extends AnyVal { + def toProtoPrimitive: String = contractId.coid + + /** An [[LfContractId]] consists of + * - a version (1 byte) + * - a discriminator (32 bytes) + * - a suffix (at most 94 bytes) + * Thoses 1 + 32 + 94 = 127 bytes are base-16 encoded, so this makes 254 chars at most. + * See https://github.com/digital-asset/daml/blob/main/daml-lf/spec/contract-id.rst + */ + def toLengthLimitedString: String255 = checked(String255.tryCreate(contractId.coid)) + def encodeDeterministically: ByteString = ByteString.copyFromUtf8(toProtoPrimitive) + + /** Converts an [[LfContractId]] into a contract ID bound to a template usable with the Java codegen API. + * `Unchecked` means that we do not check that the contract ID actually refers to a contract of + * the template `T`. + */ + def toContractIdUnchecked[T]: ContractId[T] = + new ContractId(contractId.coid) + } + + implicit val orderingLfContractId: Ordering[LfContractId] = + Ordering.by[LfContractId, String](_.coid) +} + +final case class MalformedContractId(id: String, message: String) { + override def toString: String = + s"malformed contract id '$id'" + (if (message.nonEmpty) s". $message" else "") +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/ConfirmationPolicy.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/ConfirmationPolicy.scala new file mode 100644 index 0000000000..4abaa38e0a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/ConfirmationPolicy.scala @@ -0,0 +1,222 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import cats.Order +import cats.syntax.parallel.* +import com.digitalasset.canton.LfPartyId +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.data.{ConfirmingParty, Informee, PlainInformee} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.{ + DefaultDeserializationError, + DeserializationError, + DeterministicEncoding, +} +import com.digitalasset.canton.topology.client.TopologySnapshot +import com.digitalasset.canton.topology.transaction.{ParticipantAttributes, TrustLevel} +import com.digitalasset.canton.util.FutureInstances.* +import com.digitalasset.canton.util.LfTransactionUtil +import com.google.protobuf.ByteString + +import scala.concurrent.{ExecutionContext, Future} + +sealed trait ConfirmationPolicy extends Product with Serializable with PrettyPrinting { + protected val name: String + protected val index: Int + + def toProtoPrimitive: ByteString = DeterministicEncoding.encodeString(name) + + def informeesAndThreshold(actionNode: LfActionNode, topologySnapshot: TopologySnapshot)(implicit + ec: ExecutionContext + ): Future[(Set[Informee], NonNegativeInt)] + + /** The minimal acceptable trust level of the sender of mediator response */ + def requiredTrustLevel: TrustLevel + + /** The minimum threshold for views of requests with this policy. + * The mediator checks that all views have at least the given threshold. + */ + def minimumThreshold(informees: Set[Informee]): NonNegativeInt = NonNegativeInt.one + + protected def additionalWeightOfSubmittingAdminParty( + informees: Set[Informee], + adminParty: LfPartyId, + ): NonNegativeInt = + informees + .collectFirst { case ConfirmingParty(`adminParty`, _, _) => NonNegativeInt.zero } + .getOrElse(NonNegativeInt.one) + + def withSubmittingAdminParty( + submittingAdminPartyO: Option[LfPartyId] + )(informees: Set[Informee], threshold: NonNegativeInt): (Set[Informee], NonNegativeInt) = + submittingAdminPartyO match { + case Some(submittingAdminParty) => + val oldSubmittingInformee = informees + .find(_.party == submittingAdminParty) + .getOrElse(PlainInformee(submittingAdminParty)) + val additionalWeight = + additionalWeightOfSubmittingAdminParty( + informees, + submittingAdminParty, + ) + val newSubmittingInformee = + oldSubmittingInformee.withAdditionalWeight(additionalWeight) + + val newInformees = informees - oldSubmittingInformee + newSubmittingInformee + val newThreshold = threshold + additionalWeight + + newInformees -> newThreshold + + case None => informees -> threshold + } + + override def pretty: Pretty[ConfirmationPolicy] = prettyOfObject[ConfirmationPolicy] +} + +object ConfirmationPolicy { + + private val havingVip: ParticipantAttributes => Boolean = _.trustLevel == TrustLevel.Vip + private val havingConfirmer: ParticipantAttributes => Boolean = _.permission.canConfirm + + private def toInformeesAndThreshold( + confirmingParties: Set[LfPartyId], + plainInformees: Set[LfPartyId], + requiredTrustLevel: TrustLevel, + ): (Set[Informee], NonNegativeInt) = { + // We make sure that the threshold is at least 1 so that a transaction is not vacuously approved if the confirming parties are empty. + val threshold = NonNegativeInt.tryCreate(Math.max(confirmingParties.size, 1)) + val informees = + confirmingParties.map(ConfirmingParty(_, PositiveInt.one, requiredTrustLevel): Informee) ++ + plainInformees.map(PlainInformee) + (informees, threshold) + } + + case object Vip extends ConfirmationPolicy { + override val name = "Vip" + protected override val index: Int = 0 + + override def informeesAndThreshold(node: LfActionNode, topologySnapshot: TopologySnapshot)( + implicit ec: ExecutionContext + ): Future[(Set[Informee], NonNegativeInt)] = { + val stateVerifiers = LfTransactionUtil.stateKnownTo(node) + val confirmingPartiesF = stateVerifiers.toList + .parTraverseFilter { partyId => + topologySnapshot + .activeParticipantsOf(partyId) + .map(participants => participants.values.find(havingVip).map(_ => partyId)) + } + .map(_.toSet) + confirmingPartiesF.map { confirmingParties => + val plainInformees = node.informeesOfNode -- confirmingParties + val informees = + confirmingParties.map(ConfirmingParty(_, PositiveInt.one, TrustLevel.Vip)) ++ + plainInformees.map(PlainInformee) + // As all VIP participants are trusted, it suffices that one of them confirms. + (informees, NonNegativeInt.one) + } + } + + override def requiredTrustLevel: TrustLevel = TrustLevel.Vip + + override def minimumThreshold(informees: Set[Informee]): NonNegativeInt = { + // Make sure that at least one VIP needs to approve. + + val weightOfOrdinary = informees.toSeq.collect { + case ConfirmingParty(_, weight, TrustLevel.Ordinary) => weight.unwrap + }.sum + NonNegativeInt.tryCreate(weightOfOrdinary + 1) + } + + override protected def additionalWeightOfSubmittingAdminParty( + informees: Set[Informee], + adminParty: LfPartyId, + ): NonNegativeInt = + NonNegativeInt.tryCreate(informees.toSeq.map(_.weight.unwrap).sum + 1) + } + + case object Signatory extends ConfirmationPolicy { + override val name = "Signatory" + protected override val index: Int = 1 + + override def informeesAndThreshold(node: LfActionNode, topologySnapshot: TopologySnapshot)( + implicit ec: ExecutionContext + ): Future[(Set[Informee], NonNegativeInt)] = { + val confirmingParties = + LfTransactionUtil.signatoriesOrMaintainers(node) | LfTransactionUtil.actingParties(node) + require( + confirmingParties.nonEmpty, + "There must be at least one confirming party, as every node must have at least one signatory.", + ) + val plainInformees = node.informeesOfNode -- confirmingParties + Future.successful( + toInformeesAndThreshold(confirmingParties, plainInformees, TrustLevel.Ordinary) + ) + } + + override def requiredTrustLevel: TrustLevel = TrustLevel.Ordinary + } + + val values: Seq[ConfirmationPolicy] = Seq[ConfirmationPolicy](Vip, Signatory) + + require( + values.zipWithIndex.forall { case (policy, index) => policy.index == index }, + "Mismatching policy indices.", + ) + + /** Ordering for [[ConfirmationPolicy]] */ + implicit val orderConfirmationPolicy: Order[ConfirmationPolicy] = + Order.by[ConfirmationPolicy, Int](_.index) + + /** Chooses appropriate confirmation policies for a transaction. + * It chooses [[Vip]] if every node has at least one VIP who knows the state + * It chooses [[Signatory]] if every node has a Participant that can confirm. + */ + def choose(transaction: LfVersionedTransaction, topologySnapshot: TopologySnapshot)(implicit + ec: ExecutionContext + ): Future[Seq[ConfirmationPolicy]] = { + + val actionNodes = transaction.nodes.values.collect { case an: LfActionNode => an } + + val vipCheckPartiesPerNode = actionNodes.map { node => + node.informeesOfNode & LfTransactionUtil.stateKnownTo(node) + } + val signatoriesCheckPartiesPerNode = actionNodes.map { node => + LfTransactionUtil.signatoriesOrMaintainers(node) | LfTransactionUtil.actingParties(node) + } + val allParties = + (vipCheckPartiesPerNode.flatten ++ signatoriesCheckPartiesPerNode.flatten).toSet + // TODO(i4930) - potentially batch this lookup + val eligibleParticipantsF = + allParties.toList + .parTraverse(partyId => + topologySnapshot.activeParticipantsOf(partyId).map { res => + (partyId, (res.values.exists(havingVip), res.values.exists(havingConfirmer))) + } + ) + .map(_.toMap) + + eligibleParticipantsF.map { eligibleParticipants => + val hasVipForEachNode = vipCheckPartiesPerNode.forall { + _.exists(eligibleParticipants(_)._1) + } + val hasConfirmersForEachNode = signatoriesCheckPartiesPerNode.forall { + _.exists(eligibleParticipants(_)._2) + } + List(hasVipForEachNode -> Vip, hasConfirmersForEachNode -> Signatory) + .filter(_._1) + .map(_._2) + } + } + + def fromProtoPrimitive( + encodedName: ByteString + ): Either[DeserializationError, ConfirmationPolicy] = + DeterministicEncoding.decodeString(encodedName).flatMap { + case (Vip.name, _) => Right(Vip) + case (Signatory.name, _) => Right(Signatory) + case (badName, _) => + Left(DefaultDeserializationError(s"Invalid confirmation policy $badName")) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/ContractMetadata.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/ContractMetadata.scala new file mode 100644 index 0000000000..6662de1611 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/ContractMetadata.scala @@ -0,0 +1,150 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.digitalasset.canton.ProtoDeserializationError.FieldNotSet +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.ContractMetadata.InvalidContractMetadata +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.version.{ + HasVersionedMessageCompanion, + HasVersionedMessageCompanionDbHelpers, + HasVersionedWrapper, + ProtoVersion, + ProtocolVersion, +} +import com.digitalasset.canton.{LfPartyId, LfVersioned, checked} + +/** Metadata for a contract. + * + * @param signatories Must include the maintainers of the key if any + * @param stakeholders Must include the signatories + * @throws ContractMetadata.InvalidContractMetadata if some maintainers are not signatories or some signatories are not stakeholders. + */ +final case class ContractMetadata private ( + signatories: Set[LfPartyId], + stakeholders: Set[LfPartyId], + maybeKeyWithMaintainersVersioned: Option[LfVersioned[LfGlobalKeyWithMaintainers]], +) extends HasVersionedWrapper[ContractMetadata] + with PrettyPrinting { + + { + val nonSignatoryMaintainers = maintainers -- signatories + if (nonSignatoryMaintainers.nonEmpty) + throw InvalidContractMetadata(show"Maintainers are not signatories: $nonSignatoryMaintainers") + val nonStakeholderSignatories = signatories -- stakeholders + if (nonStakeholderSignatories.nonEmpty) + throw InvalidContractMetadata( + show"Signatories are not stakeholders: $nonStakeholderSignatories" + ) + } + + override protected def companionObj = ContractMetadata + + def maybeKeyWithMaintainers: Option[LfGlobalKeyWithMaintainers] = + maybeKeyWithMaintainersVersioned.map(_.unversioned) + + def maybeKey: Option[LfGlobalKey] = maybeKeyWithMaintainers.map(_.globalKey) + + def maintainers: Set[LfPartyId] = + maybeKeyWithMaintainers.fold(Set.empty[LfPartyId])(_.maintainers) + + private[protocol] def toProtoV1: v1.SerializableContract.Metadata = { + v1.SerializableContract.Metadata( + nonMaintainerSignatories = (signatories -- maintainers).toList, + nonSignatoryStakeholders = (stakeholders -- signatories).toList, + key = maybeKeyWithMaintainersVersioned.map(x => + GlobalKeySerialization.assertToProto( + x.map(keyWithMaintainers => keyWithMaintainers.globalKey) + ) + ), + maintainers = maintainers.toSeq, + ) + } + + override def pretty: Pretty[ContractMetadata] = prettyOfClass( + param("signatories", _.signatories), + param("stakeholders", _.stakeholders), + paramIfDefined("key", _.maybeKey), + paramIfNonEmpty("maintainers", _.maintainers), + ) +} + +object ContractMetadata + extends HasVersionedMessageCompanion[ContractMetadata] + with HasVersionedMessageCompanionDbHelpers[ContractMetadata] { + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> ProtoCodec( + ProtocolVersion.v30, + supportedProtoVersion(v1.SerializableContract.Metadata)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + override def name: String = "contract metadata" + + final case class InvalidContractMetadata(message: String) extends RuntimeException(message) + + def tryCreate( + signatories: Set[LfPartyId], + stakeholders: Set[LfPartyId], + maybeKeyWithMaintainers: Option[LfVersioned[LfGlobalKeyWithMaintainers]], + ): ContractMetadata = + new ContractMetadata(signatories, stakeholders, maybeKeyWithMaintainers) + + def create( + signatories: Set[LfPartyId], + stakeholders: Set[LfPartyId], + maybeKeyWithMaintainers: Option[LfVersioned[LfGlobalKeyWithMaintainers]], + ): Either[String, ContractMetadata] = + Either + .catchOnly[InvalidContractMetadata]( + tryCreate(signatories, stakeholders, maybeKeyWithMaintainers) + ) + .leftMap(_.message) + + def empty: ContractMetadata = checked(ContractMetadata.tryCreate(Set.empty, Set.empty, None)) + + def fromProtoV1( + metadataP: v1.SerializableContract.Metadata + ): ParsingResult[ContractMetadata] = { + val v1.SerializableContract.Metadata( + nonMaintainerSignatoriesP, + nonSignatoryStakeholdersP, + keyP, + maintainersP, + ) = + metadataP + for { + nonMaintainerSignatories <- nonMaintainerSignatoriesP.traverse(ProtoConverter.parseLfPartyId) + nonSignatoryStakeholders <- nonSignatoryStakeholdersP.traverse(ProtoConverter.parseLfPartyId) + keyO <- keyP.traverse(GlobalKeySerialization.fromProtoV0) + maintainersList <- maintainersP.traverse(ProtoConverter.parseLfPartyId) + _ <- Either.cond(maintainersList.isEmpty || keyO.isDefined, (), FieldNotSet("Metadata.key")) + } yield { + val maintainers = maintainersList.toSet + val keyWithMaintainersO = keyO.map(_.map(LfGlobalKeyWithMaintainers(_, maintainers))) + val signatories = maintainers ++ nonMaintainerSignatories.toSet + val stakeholders = signatories ++ nonSignatoryStakeholders.toSet + checked(ContractMetadata.tryCreate(signatories, stakeholders, keyWithMaintainersO)) + } + } +} + +final case class WithContractMetadata[+A](private val x: A, metadata: ContractMetadata) { + def unwrap: A = x +} + +object WithContractMetadata { + implicit def prettyWithContractMetadata[A: Pretty]: Pretty[WithContractMetadata[A]] = { + import Pretty.* + prettyOfClass( + unnamedParam(_.x), + param("metadata", _.metadata), + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/CreatedContract.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/CreatedContract.scala new file mode 100644 index 0000000000..ba6e874303 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/CreatedContract.scala @@ -0,0 +1,107 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import cats.syntax.either.* +import com.digitalasset.canton.ProtoDeserializationError.OtherError +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult + +/** @param consumedInCore Whether the contract is consumed in the core of the view. + * [[com.digitalasset.canton.protocol.WellFormedTransaction]] checks that a created contract + * can only be used in the same or deeper rollback scopes as the create, so if `rolledBack` is true + * then `consumedInCore` is false. + * @param rolledBack Whether the contract creation has a different rollback scope than the view. + */ +final case class CreatedContract private ( + contract: SerializableContract, + consumedInCore: Boolean, + rolledBack: Boolean, +) extends PrettyPrinting { + + // Note that on behalf of rolledBack contracts we still send the SerializableContract along with the contract instance + // mainly to support DAMLe.reinterpret on behalf of a top-level CreateActionDescription under a rollback node because + // we need the contract instance to construct the LfCreateCommand. + def toProtoV1: v1.CreatedContract = + v1.CreatedContract( + contract = Some(contract.toProtoV1), + consumedInCore = consumedInCore, + rolledBack = rolledBack, + ) + + override def pretty: Pretty[CreatedContract] = prettyOfClass( + unnamedParam(_.contract), + paramIfTrue("consumed in core", _.consumedInCore), + paramIfTrue("rolled back", _.rolledBack), + ) +} + +object CreatedContract { + def create( + contract: SerializableContract, + consumedInCore: Boolean, + rolledBack: Boolean, + ): Either[String, CreatedContract] = + CantonContractIdVersion + .ensureCantonContractId(contract.contractId) + .leftMap(err => s"Encountered invalid Canton contract id: ${err.toString}") + .flatMap { _ => + // Contracts created with the "authenticated" contract id prefix-of-suffix + // must have contract_salt present in order to be properly authenticated (and used for explicit disclosure) + ProtoConverter + .required("contract_salt", contract.contractSalt) + .leftMap(err => s"Failed instantiating created contract: ${err.message}") + } + .map(_ => new CreatedContract(contract, consumedInCore, rolledBack)) + + def tryCreate( + contract: SerializableContract, + consumedInCore: Boolean, + rolledBack: Boolean, + ): CreatedContract = + create( + contract = contract, + consumedInCore = consumedInCore, + rolledBack = rolledBack, + ).valueOr(err => throw new IllegalArgumentException(err)) + + def fromProtoV1( + createdContractP: v1.CreatedContract + ): ParsingResult[CreatedContract] = { + val v1.CreatedContract(contractP, consumedInCore, rolledBack) = + createdContractP + + for { + contract <- ProtoConverter + .required("contract", contractP) + .flatMap(SerializableContract.fromProtoV1) + createdContract <- create( + contract = contract, + consumedInCore = consumedInCore, + rolledBack = rolledBack, + ).leftMap(OtherError) + } yield createdContract + } +} + +/** @param consumedInView Whether the contract is consumed in the view. + * [[com.digitalasset.canton.protocol.WellFormedTransaction]] checks that a created contract + * can only be used in the same or deeper rollback scopes as the create, so if `rolledBack` is true + * then `consumedInView` is false. + * @param rolledBack Whether the contract creation has a different rollback scope than the view. + */ +final case class CreatedContractInView( + contract: SerializableContract, + consumedInView: Boolean, + rolledBack: Boolean, +) +object CreatedContractInView { + def fromCreatedContract(created: CreatedContract): CreatedContractInView = + CreatedContractInView( + created.contract, + consumedInView = created.consumedInCore, + created.rolledBack, + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParameters.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParameters.scala new file mode 100644 index 0000000000..4a104357d2 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParameters.scala @@ -0,0 +1,743 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import cats.instances.option.* +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.ProtoDeserializationError.InvariantViolation +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.DomainParameters.MaxRequestSize +import com.digitalasset.canton.protocol.DynamicDomainParameters.InvalidDynamicDomainParameters +import com.digitalasset.canton.protocol.{v1 as protoV1, v2 as protoV2} +import com.digitalasset.canton.sequencing.TrafficControlParameters +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.time.{ + Clock, + NonNegativeFiniteDuration, + PositiveSeconds, + RemoteClock, + SimClock, +} +import com.digitalasset.canton.topology.DomainId +import com.digitalasset.canton.topology.transaction.ParticipantDomainLimits +import com.digitalasset.canton.util.EitherUtil.RichEither +import com.digitalasset.canton.version.* +import com.digitalasset.canton.{ProtoDeserializationError, checked} + +import scala.concurrent.Future +import scala.math.Ordered.orderingToOrdered + +object DomainParameters { + + /** This class is used to represent domain parameter(s) that can come from static + * domain parameters or dynamic ones, depending on the protocol version. + * @param validFrom If the parameter comes from dynamic parameters, exclusive + * timestamp coming from the topology transaction, otherwise, CantonTimestamp.MinValue + * @param validUntil If the parameter comes from dynamic parameters, timestamp + * coming from the topology transaction, otherwise None + */ + final case class WithValidity[+P]( + validFrom: CantonTimestamp, + validUntil: Option[CantonTimestamp], + parameter: P, + ) { + def map[T](f: P => T): WithValidity[T] = WithValidity(validFrom, validUntil, f(parameter)) + def isValidAt(ts: CantonTimestamp) = validFrom < ts && validUntil.forall(ts <= _) + } + final case class MaxRequestSize(value: NonNegativeInt) extends AnyVal { + def unwrap = value.unwrap + } +} + +final case class StaticDomainParameters private ( + uniqueContractKeys: Boolean, // TODO(i13235) remove when UCK is gone + requiredSigningKeySchemes: NonEmpty[Set[SigningKeyScheme]], + requiredEncryptionKeySchemes: NonEmpty[Set[EncryptionKeyScheme]], + requiredSymmetricKeySchemes: NonEmpty[Set[SymmetricKeyScheme]], + requiredHashAlgorithms: NonEmpty[Set[HashAlgorithm]], + requiredCryptoKeyFormats: NonEmpty[Set[CryptoKeyFormat]], + protocolVersion: ProtocolVersion, +) extends HasProtocolVersionedWrapper[StaticDomainParameters] { + + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + StaticDomainParameters.type + ] = StaticDomainParameters.protocolVersionRepresentativeFor(protocolVersion) + + // Ensures the invariants related to default values hold + validateInstance().valueOr(err => throw new IllegalArgumentException(err)) + + @transient override protected lazy val companionObj: StaticDomainParameters.type = + StaticDomainParameters + + def update(uniqueContractKeys: Boolean = uniqueContractKeys): StaticDomainParameters = + this.copy(uniqueContractKeys = uniqueContractKeys) + + def toProtoV1: protoV1.StaticDomainParameters = + protoV1.StaticDomainParameters( + uniqueContractKeys = uniqueContractKeys, + requiredSigningKeySchemes = requiredSigningKeySchemes.toSeq.map(_.toProtoEnum), + requiredEncryptionKeySchemes = requiredEncryptionKeySchemes.toSeq.map(_.toProtoEnum), + requiredSymmetricKeySchemes = requiredSymmetricKeySchemes.toSeq.map(_.toProtoEnum), + requiredHashAlgorithms = requiredHashAlgorithms.toSeq.map(_.toProtoEnum), + requiredCryptoKeyFormats = requiredCryptoKeyFormats.toSeq.map(_.toProtoEnum), + protocolVersion = protocolVersion.toProtoPrimitive, + ) +} +object StaticDomainParameters + extends HasProtocolVersionedCompanion[StaticDomainParameters] + with ProtocolVersionedCompanionDbHelpers[StaticDomainParameters] { + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)( + protoV1.StaticDomainParameters + )( + supportedProtoVersion(_)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + override def name: String = "static domain parameters" + + def create( + uniqueContractKeys: Boolean, + requiredSigningKeySchemes: NonEmpty[Set[SigningKeyScheme]], + requiredEncryptionKeySchemes: NonEmpty[Set[EncryptionKeyScheme]], + requiredSymmetricKeySchemes: NonEmpty[Set[SymmetricKeyScheme]], + requiredHashAlgorithms: NonEmpty[Set[HashAlgorithm]], + requiredCryptoKeyFormats: NonEmpty[Set[CryptoKeyFormat]], + protocolVersion: ProtocolVersion, + ): StaticDomainParameters = StaticDomainParameters( + uniqueContractKeys = uniqueContractKeys, + requiredSigningKeySchemes = requiredSigningKeySchemes, + requiredEncryptionKeySchemes = requiredEncryptionKeySchemes, + requiredSymmetricKeySchemes = requiredSymmetricKeySchemes, + requiredHashAlgorithms = requiredHashAlgorithms, + requiredCryptoKeyFormats = requiredCryptoKeyFormats, + protocolVersion = protocolVersion, + ) + + private def requiredKeySchemes[P, A]( + field: String, + content: Seq[P], + parse: (String, P) => ParsingResult[A], + ): ParsingResult[NonEmpty[Set[A]]] = + ProtoConverter.parseRequiredNonEmpty(parse(field, _), field, content).map(_.toSet) + + def fromProtoV1( + domainParametersP: protoV1.StaticDomainParameters + ): ParsingResult[StaticDomainParameters] = { + val protoV1.StaticDomainParameters( + uniqueContractKeys, + requiredSigningKeySchemesP, + requiredEncryptionKeySchemesP, + requiredSymmetricKeySchemesP, + requiredHashAlgorithmsP, + requiredCryptoKeyFormatsP, + protocolVersionP, + ) = domainParametersP + + for { + requiredSigningKeySchemes <- requiredKeySchemes( + "requiredSigningKeySchemes", + requiredSigningKeySchemesP, + SigningKeyScheme.fromProtoEnum, + ) + requiredEncryptionKeySchemes <- requiredKeySchemes( + "requiredEncryptionKeySchemes", + requiredEncryptionKeySchemesP, + EncryptionKeyScheme.fromProtoEnum, + ) + requiredSymmetricKeySchemes <- requiredKeySchemes( + "requiredSymmetricKeySchemes", + requiredSymmetricKeySchemesP, + SymmetricKeyScheme.fromProtoEnum, + ) + requiredHashAlgorithms <- requiredKeySchemes( + "requiredHashAlgorithms", + requiredHashAlgorithmsP, + HashAlgorithm.fromProtoEnum, + ) + requiredCryptoKeyFormats <- requiredKeySchemes( + "requiredCryptoKeyFormats", + requiredCryptoKeyFormatsP, + CryptoKeyFormat.fromProtoEnum, + ) + protocolVersion <- ProtocolVersion.fromProtoPrimitive(protocolVersionP) + } yield StaticDomainParameters( + uniqueContractKeys, + requiredSigningKeySchemes, + requiredEncryptionKeySchemes, + requiredSymmetricKeySchemes, + requiredHashAlgorithms, + requiredCryptoKeyFormats, + protocolVersion, + ) + } +} + +/** @param participantResponseTimeout the amount of time (w.r.t. the sequencer clock) that a participant may take + * to validate a command and send a response. + * Once the timeout has elapsed for a request, + * the mediator will discard all responses for that request. + * Choose a lower value to reduce the time to reject a command in case one of the + * involved participants has high load / operational problems. + * Choose a higher value to reduce the likelihood of commands being rejected + * due to timeouts. + * @param mediatorReactionTimeout the maximum amount of time (w.r.t. the sequencer clock) that the mediator may take + * to validate the responses for a request and broadcast the result message. + * The mediator reaction timeout starts when the confirmation response timeout has elapsed. + * If the mediator does not send a result message within that timeout, + * participants must rollback the transaction underlying the request. + * Also applies to determine the max-sequencing-time of daml 3.x topology transactions + * governed by mediator group. + * Choose a lower value to reduce the time to learn whether a command + * has been accepted. + * Choose a higher value to reduce the likelihood of commands being rejected + * due to timeouts. + * @param transferExclusivityTimeout this timeout affects who can initiate a transfer-in. + * Before the timeout, only the submitter of the transfer-out can initiate the + * corresponding transfer-in. + * From the timeout onwards, every stakeholder of the contract can initiate a transfer-in, + * if it has not yet happened. + * Moreover, if this timeout is zero, no automatic transfer-ins will occur. + * Choose a low value, if you want to lower the time that contracts can be inactive + * due to ongoing transfers. + * Choosing a high value currently has no practical benefit, but + * will have benefits in a future version. + * TODO(M41): Document those benefits + * @param topologyChangeDelay determines the offset applied to the topology transactions before they become active, + * in order to support parallel transaction processing + * @param ledgerTimeRecordTimeTolerance the maximum absolute difference between the ledger time and the + * record time of a command. + * If the absolute difference would be larger for a command, + * then the command must be rejected. + * @param mediatorDeduplicationTimeout the time for how long a request will be stored at the mediator for deduplication + * purposes. This must be at least twice the `ledgerTimeRecordTimeTolerance`. + * It is fine to choose the minimal value, unless you plan to subsequently + * increase `ledgerTimeRecordTimeTolerance.` + * @param reconciliationInterval The size of the reconciliation interval (minimum duration between two ACS commitments). + * Note: default to [[StaticDomainParameters.defaultReconciliationInterval]] for backward + * compatibility. + * Should be significantly longer than the period of time it takes to compute the commitment and have it sequenced of the domain. + * Otherwise, ACS commitments will keep being exchanged continuously on an idle domain. + * @param maxRatePerParticipant maximum number of messages sent per participant per second + * @param maxRequestSize maximum size of messages (in bytes) that the domain can receive through the public API + * @param sequencerAggregateSubmissionTimeout the maximum time for how long an incomplete aggregate submission request is + * allowed to stay pending in the sequencer's state before it's removed. + * Must be at least `participantResponseTimeout` + `mediatorReactionTimeout` in a practical system. + * @throws DynamicDomainParameters$.InvalidDynamicDomainParameters + * if `mediatorDeduplicationTimeout` is less than twice of `ledgerTimeRecordTimeTolerance`. + */ +final case class DynamicDomainParameters private ( + participantResponseTimeout: NonNegativeFiniteDuration, + mediatorReactionTimeout: NonNegativeFiniteDuration, + transferExclusivityTimeout: NonNegativeFiniteDuration, + topologyChangeDelay: NonNegativeFiniteDuration, + ledgerTimeRecordTimeTolerance: NonNegativeFiniteDuration, + mediatorDeduplicationTimeout: NonNegativeFiniteDuration, + reconciliationInterval: PositiveSeconds, + maxRatePerParticipant: NonNegativeInt, + maxRequestSize: MaxRequestSize, + sequencerAggregateSubmissionTimeout: NonNegativeFiniteDuration, + trafficControlParameters: Option[TrafficControlParameters], +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + DynamicDomainParameters.type + ] +) extends HasProtocolVersionedWrapper[DynamicDomainParameters] + with PrettyPrinting { + + @transient override protected lazy val companionObj: DynamicDomainParameters.type = + DynamicDomainParameters + + // https://docs.google.com/document/d/1tpPbzv2s6bjbekVGBn6X5VZuw0oOTHek5c30CBo4UkI/edit#bookmark=id.jtqcu52qpf82 + if (ledgerTimeRecordTimeTolerance * NonNegativeInt.tryCreate(2) > mediatorDeduplicationTimeout) + throw new InvalidDynamicDomainParameters( + s"The ledgerTimeRecordTimeTolerance ($ledgerTimeRecordTimeTolerance) must be at most half of the " + + s"mediatorDeduplicationTimeout ($mediatorDeduplicationTimeout)." + ) + + /** In some situations, the sequencer signs transaction with slightly outdated keys. + * This is to allow recipients to verify sequencer signatures when the sequencer keys have been rolled over and + * they have not yet received the new keys. + * This parameter determines how much outdated a signing key can be. + * Choose a higher value to avoid that the sequencer refuses to sign and send messages. + * Choose a lower value to reduce the latency of sequencer key rollovers. + * The sequencer signing tolerance must be at least `participantResponseTimeout + mediatorReactionTimeout`. + */ + def sequencerSigningTolerance: NonNegativeFiniteDuration = + (participantResponseTimeout + mediatorReactionTimeout) * NonNegativeInt.tryCreate(2) + + def automaticTransferInEnabled: Boolean = + transferExclusivityTimeout > NonNegativeFiniteDuration.Zero + + def update( + transferExclusivityTimeout: NonNegativeFiniteDuration = transferExclusivityTimeout, + reconciliationInterval: PositiveSeconds = reconciliationInterval, + maxRatePerParticipant: NonNegativeInt = maxRatePerParticipant, + ): DynamicDomainParameters = + this.copy( + transferExclusivityTimeout = transferExclusivityTimeout, + reconciliationInterval = reconciliationInterval, + maxRatePerParticipant = maxRatePerParticipant, + )(representativeProtocolVersion) + + def tryUpdate( + participantResponseTimeout: NonNegativeFiniteDuration = participantResponseTimeout, + mediatorReactionTimeout: NonNegativeFiniteDuration = mediatorReactionTimeout, + transferExclusivityTimeout: NonNegativeFiniteDuration = transferExclusivityTimeout, + topologyChangeDelay: NonNegativeFiniteDuration = topologyChangeDelay, + ledgerTimeRecordTimeTolerance: NonNegativeFiniteDuration = ledgerTimeRecordTimeTolerance, + mediatorDeduplicationTimeout: NonNegativeFiniteDuration = mediatorDeduplicationTimeout, + reconciliationInterval: PositiveSeconds = reconciliationInterval, + maxRatePerParticipant: NonNegativeInt = maxRatePerParticipant, + sequencerAggregateSubmissionTimeout: NonNegativeFiniteDuration = + sequencerAggregateSubmissionTimeout, + trafficControlParameters: Option[TrafficControlParameters] = trafficControlParameters, + ): DynamicDomainParameters = DynamicDomainParameters.tryCreate( + participantResponseTimeout = participantResponseTimeout, + mediatorReactionTimeout = mediatorReactionTimeout, + transferExclusivityTimeout = transferExclusivityTimeout, + topologyChangeDelay = topologyChangeDelay, + ledgerTimeRecordTimeTolerance = ledgerTimeRecordTimeTolerance, + mediatorDeduplicationTimeout = mediatorDeduplicationTimeout, + reconciliationInterval = reconciliationInterval, + maxRatePerParticipant = maxRatePerParticipant, + maxRequestSize = maxRequestSize, + sequencerAggregateSubmissionTimeout = sequencerAggregateSubmissionTimeout, + trafficControlParameters = trafficControlParameters, + )(representativeProtocolVersion) + + def toProtoV2: protoV2.DynamicDomainParameters = protoV2.DynamicDomainParameters( + participantResponseTimeout = Some(participantResponseTimeout.toProtoPrimitive), + mediatorReactionTimeout = Some(mediatorReactionTimeout.toProtoPrimitive), + transferExclusivityTimeout = Some(transferExclusivityTimeout.toProtoPrimitive), + topologyChangeDelay = Some(topologyChangeDelay.toProtoPrimitive), + ledgerTimeRecordTimeTolerance = Some(ledgerTimeRecordTimeTolerance.toProtoPrimitive), + mediatorDeduplicationTimeout = Some(mediatorDeduplicationTimeout.toProtoPrimitive), + reconciliationInterval = Some(reconciliationInterval.toProtoPrimitive), + maxRequestSize = maxRequestSize.unwrap, + // TODO(#14053) add permissioned domain mode + permissionedDomain = false, + // TODO(#14054) add restricted packages mode + requiredPackages = Seq.empty, + // TODO(#14054) add only restricted packages supported + onlyRequiredPackagesPermitted = false, + defaultParticipantLimits = Some(v2DefaultParticipantLimits.toProto), + // TODO(#14050) limit number of participants that can be allocated to a given party + defaultMaxHostingParticipantsPerParty = 0, + sequencerAggregateSubmissionTimeout = + Some(sequencerAggregateSubmissionTimeout.toProtoPrimitive), + trafficControlParameters = trafficControlParameters.map(_.toProtoV0), + ) + + // TODO(#14052) add topology limits + def v2DefaultParticipantLimits: ParticipantDomainLimits = ParticipantDomainLimits( + maxRate = maxRatePerParticipant.unwrap, + maxNumParties = Int.MaxValue, + maxNumPackages = Int.MaxValue, + ) + + override def pretty: Pretty[DynamicDomainParameters] = { + if ( + representativeProtocolVersion >= companionObj.protocolVersionRepresentativeFor( + ProtocolVersion.v30 + ) + ) { + prettyOfClass( + param("participant response timeout", _.participantResponseTimeout), + param("mediator reaction timeout", _.mediatorReactionTimeout), + param("transfer exclusivity timeout", _.transferExclusivityTimeout), + param("topology change delay", _.topologyChangeDelay), + param("ledger time record time tolerance", _.ledgerTimeRecordTimeTolerance), + param("mediator deduplication timeout", _.mediatorDeduplicationTimeout), + param("reconciliation interval", _.reconciliationInterval), + param("max rate per participant", _.maxRatePerParticipant), + param("max request size", _.maxRequestSize.value), + param("sequencer aggregate submission timeout", _.sequencerAggregateSubmissionTimeout), + paramIfDefined("traffic control config", _.trafficControlParameters), + ) + } else { + prettyOfClass( + param("participant response timeout", _.participantResponseTimeout), + param("mediator reaction timeout", _.mediatorReactionTimeout), + param("transfer exclusivity timeout", _.transferExclusivityTimeout), + param("topology change delay", _.topologyChangeDelay), + param("ledger time record time tolerance", _.ledgerTimeRecordTimeTolerance), + param("mediator deduplication timeout", _.mediatorDeduplicationTimeout), + param("reconciliation interval", _.reconciliationInterval), + param("max rate per participant", _.maxRatePerParticipant), + param("max request size", _.maxRequestSize.value), + ) + } + } +} + +object DynamicDomainParameters extends HasProtocolVersionedCompanion[DynamicDomainParameters] { + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(2) -> VersionedProtoConverter(ProtocolVersion.v30)( + protoV2.DynamicDomainParameters + )( + supportedProtoVersion(_)(fromProtoV2), + _.toProtoV2.toByteString, + ) + ) + + override def name: String = "dynamic domain parameters" + + lazy val defaultReconciliationInterval: PositiveSeconds = PositiveSeconds.tryOfSeconds(60) + lazy val defaultMaxRatePerParticipant: NonNegativeInt = NonNegativeInt.tryCreate(1000000) + lazy val defaultMaxRequestSize: MaxRequestSize = MaxRequestSize( + NonNegativeInt.tryCreate(10 * 1024 * 1024) + ) + + private val defaultParticipantResponseTimeout: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.tryOfSeconds(30) + private val defaultMediatorReactionTimeout: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.tryOfSeconds(30) + + private val defaultTransferExclusivityTimeout: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.tryOfSeconds(60) + + private val defaultTrafficControlParameters: Option[TrafficControlParameters] = + Option.empty[TrafficControlParameters] + + private val defaultTopologyChangeDelay: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.tryOfMillis(250) + private val defaultTopologyChangeDelayNonStandardClock: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.Zero // SimClock, RemoteClock + + private val defaultLedgerTimeRecordTimeTolerance: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.tryOfSeconds(60) + + private val defaultMediatorDeduplicationTimeout: NonNegativeFiniteDuration = + defaultLedgerTimeRecordTimeTolerance * NonNegativeInt.tryCreate(2) + + // Based on SequencerClientConfig.defaultMaxSequencingTimeOffset + private val defaultSequencerAggregateSubmissionTimeout: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.tryOfMinutes(5) + + /** Safely creates DynamicDomainParameters. + * + * @return `Left(...)` if `mediatorDeduplicationTimeout` is less than twice of `ledgerTimeRecordTimeTolerance`. + */ + private def create( + participantResponseTimeout: NonNegativeFiniteDuration, + mediatorReactionTimeout: NonNegativeFiniteDuration, + transferExclusivityTimeout: NonNegativeFiniteDuration, + topologyChangeDelay: NonNegativeFiniteDuration, + ledgerTimeRecordTimeTolerance: NonNegativeFiniteDuration, + mediatorDeduplicationTimeout: NonNegativeFiniteDuration, + reconciliationInterval: PositiveSeconds, + maxRatePerParticipant: NonNegativeInt, + maxRequestSize: MaxRequestSize, + sequencerAggregateSubmissionTimeout: NonNegativeFiniteDuration, + trafficControlConfig: Option[TrafficControlParameters], + )( + representativeProtocolVersion: RepresentativeProtocolVersion[DynamicDomainParameters.type] + ): Either[InvalidDynamicDomainParameters, DynamicDomainParameters] = + Either.catchOnly[InvalidDynamicDomainParameters]( + tryCreate( + participantResponseTimeout, + mediatorReactionTimeout, + transferExclusivityTimeout, + topologyChangeDelay, + ledgerTimeRecordTimeTolerance, + mediatorDeduplicationTimeout, + reconciliationInterval, + maxRatePerParticipant, + maxRequestSize, + sequencerAggregateSubmissionTimeout, + trafficControlConfig, + )(representativeProtocolVersion) + ) + + /** Creates DynamicDomainParameters + * + * @throws InvalidDynamicDomainParameters if `mediatorDeduplicationTimeout` is less than twice of `ledgerTimeRecordTimeTolerance`. + */ + def tryCreate( + participantResponseTimeout: NonNegativeFiniteDuration, + mediatorReactionTimeout: NonNegativeFiniteDuration, + transferExclusivityTimeout: NonNegativeFiniteDuration, + topologyChangeDelay: NonNegativeFiniteDuration, + ledgerTimeRecordTimeTolerance: NonNegativeFiniteDuration, + mediatorDeduplicationTimeout: NonNegativeFiniteDuration, + reconciliationInterval: PositiveSeconds, + maxRatePerParticipant: NonNegativeInt, + maxRequestSize: MaxRequestSize, + sequencerAggregateSubmissionTimeout: NonNegativeFiniteDuration, + trafficControlParameters: Option[TrafficControlParameters], + )( + representativeProtocolVersion: RepresentativeProtocolVersion[DynamicDomainParameters.type] + ): DynamicDomainParameters = { + DynamicDomainParameters( + participantResponseTimeout, + mediatorReactionTimeout, + transferExclusivityTimeout, + topologyChangeDelay, + ledgerTimeRecordTimeTolerance, + mediatorDeduplicationTimeout, + reconciliationInterval, + maxRatePerParticipant, + maxRequestSize, + sequencerAggregateSubmissionTimeout, + trafficControlParameters, + )(representativeProtocolVersion) + } + + /** Default dynamic domain parameters for non-static clocks */ + def defaultValues(protocolVersion: ProtocolVersion): DynamicDomainParameters = + initialValues(defaultTopologyChangeDelay, protocolVersion) + + /** Default mediator-X dynamic parameters allowing to specify more generous mediator-x timeouts for BFT-distribution */ + def defaultXValues( + protocolVersion: ProtocolVersion, + mediatorReactionTimeout: NonNegativeFiniteDuration = defaultMediatorReactionTimeout, + ): DynamicDomainParameters = + initialValues( + defaultTopologyChangeDelay, + protocolVersion, + mediatorReactionTimeout = mediatorReactionTimeout, + ) + + // TODO(#15161) Rework this when old nodes are killed + def initialValues( + topologyChangeDelay: NonNegativeFiniteDuration, + protocolVersion: ProtocolVersion, + mediatorReactionTimeout: NonNegativeFiniteDuration = defaultMediatorReactionTimeout, + ): DynamicDomainParameters = checked( // safe because default values are safe + DynamicDomainParameters.tryCreate( + participantResponseTimeout = defaultParticipantResponseTimeout, + mediatorReactionTimeout = mediatorReactionTimeout, + transferExclusivityTimeout = defaultTransferExclusivityTimeout, + topologyChangeDelay = topologyChangeDelay, + ledgerTimeRecordTimeTolerance = defaultLedgerTimeRecordTimeTolerance, + mediatorDeduplicationTimeout = defaultMediatorDeduplicationTimeout, + reconciliationInterval = DynamicDomainParameters.defaultReconciliationInterval, + maxRatePerParticipant = DynamicDomainParameters.defaultMaxRatePerParticipant, + maxRequestSize = DynamicDomainParameters.defaultMaxRequestSize, + sequencerAggregateSubmissionTimeout = defaultSequencerAggregateSubmissionTimeout, + trafficControlParameters = defaultTrafficControlParameters, + )( + protocolVersionRepresentativeFor(protocolVersion) + ) + ) + + def tryInitialValues( + topologyChangeDelay: NonNegativeFiniteDuration, + protocolVersion: ProtocolVersion, + maxRatePerParticipant: NonNegativeInt = DynamicDomainParameters.defaultMaxRatePerParticipant, + maxRequestSize: MaxRequestSize = DynamicDomainParameters.defaultMaxRequestSize, + mediatorReactionTimeout: NonNegativeFiniteDuration = defaultMediatorReactionTimeout, + reconciliationInterval: PositiveSeconds = + DynamicDomainParameters.defaultReconciliationInterval, + sequencerAggregateSubmissionTimeout: NonNegativeFiniteDuration = + defaultSequencerAggregateSubmissionTimeout, + ) = + DynamicDomainParameters.tryCreate( + participantResponseTimeout = defaultParticipantResponseTimeout, + mediatorReactionTimeout = mediatorReactionTimeout, + transferExclusivityTimeout = defaultTransferExclusivityTimeout, + topologyChangeDelay = topologyChangeDelay, + ledgerTimeRecordTimeTolerance = defaultLedgerTimeRecordTimeTolerance, + mediatorDeduplicationTimeout = defaultMediatorDeduplicationTimeout, + reconciliationInterval = reconciliationInterval, + maxRatePerParticipant = maxRatePerParticipant, + maxRequestSize = maxRequestSize, + sequencerAggregateSubmissionTimeout = sequencerAggregateSubmissionTimeout, + trafficControlParameters = defaultTrafficControlParameters, + )( + protocolVersionRepresentativeFor(protocolVersion) + ) + + def initialValues(clock: Clock, protocolVersion: ProtocolVersion): DynamicDomainParameters = { + val topologyChangeDelay = clock match { + case _: RemoteClock | _: SimClock => defaultTopologyChangeDelayNonStandardClock + case _ => defaultTopologyChangeDelay + } + initialValues(topologyChangeDelay, protocolVersion) + } + + // if there is no topology change delay defined (or not yet propagated), we'll use this one + val topologyChangeDelayIfAbsent: NonNegativeFiniteDuration = NonNegativeFiniteDuration.Zero + + def fromProtoV2( + domainParametersP: protoV2.DynamicDomainParameters + ): ParsingResult[DynamicDomainParameters] = { + val protoV2.DynamicDomainParameters( + participantResponseTimeoutP, + mediatorReactionTimeoutP, + transferExclusivityTimeoutP, + topologyChangeDelayP, + ledgerTimeRecordTimeToleranceP, + reconciliationIntervalP, + mediatorDeduplicationTimeoutP, + maxRequestSizeP, + _permissionedDomain, + _requiredPackages, + _onlyRequiredPackagesPermitted, + defaultLimitsP, + _partyHostingLimits, + sequencerAggregateSubmissionTimeoutP, + trafficControlConfigP, + ) = domainParametersP + for { + + participantResponseTimeout <- NonNegativeFiniteDuration.fromProtoPrimitiveO( + "participantResponseTimeout" + )( + participantResponseTimeoutP + ) + mediatorReactionTimeout <- NonNegativeFiniteDuration.fromProtoPrimitiveO( + "mediatorReactionTimeout" + )( + mediatorReactionTimeoutP + ) + transferExclusivityTimeout <- NonNegativeFiniteDuration.fromProtoPrimitiveO( + "transferExclusivityTimeout" + )( + transferExclusivityTimeoutP + ) + topologyChangeDelay <- NonNegativeFiniteDuration.fromProtoPrimitiveO("topologyChangeDelay")( + topologyChangeDelayP + ) + ledgerTimeRecordTimeTolerance <- NonNegativeFiniteDuration.fromProtoPrimitiveO( + "ledgerTimeRecordTimeTolerance" + )( + ledgerTimeRecordTimeToleranceP + ) + + reconciliationInterval <- PositiveSeconds.fromProtoPrimitiveO( + "reconciliationInterval" + )( + reconciliationIntervalP + ) + mediatorDeduplicationTimeout <- NonNegativeFiniteDuration.fromProtoPrimitiveO( + "mediatorDeduplicationTimeout" + )( + mediatorDeduplicationTimeoutP + ) + + maxRatePerParticipantP <- ProtoConverter.parseRequired[Int, v2.ParticipantDomainLimits]( + item => Right(item.maxRate), + "default_limits", + defaultLimitsP, + ) + + maxRatePerParticipant <- NonNegativeInt + .create(maxRatePerParticipantP) + .leftMap(InvariantViolation.toProtoDeserializationError) + + maxRequestSize <- NonNegativeInt + .create(maxRequestSizeP) + .map(MaxRequestSize) + .leftMap(InvariantViolation.toProtoDeserializationError) + + sequencerAggregateSubmissionTimeout <- NonNegativeFiniteDuration.fromProtoPrimitiveO( + "sequencerAggregateSubmissionTimeout" + )( + sequencerAggregateSubmissionTimeoutP + ) + + trafficControlConfig <- trafficControlConfigP.traverse(TrafficControlParameters.fromProtoV0) + + domainParameters <- + create( + participantResponseTimeout = participantResponseTimeout, + mediatorReactionTimeout = mediatorReactionTimeout, + transferExclusivityTimeout = transferExclusivityTimeout, + topologyChangeDelay = topologyChangeDelay, + ledgerTimeRecordTimeTolerance = ledgerTimeRecordTimeTolerance, + mediatorDeduplicationTimeout = mediatorDeduplicationTimeout, + reconciliationInterval = reconciliationInterval, + maxRatePerParticipant = maxRatePerParticipant, + maxRequestSize = maxRequestSize, + sequencerAggregateSubmissionTimeout = sequencerAggregateSubmissionTimeout, + trafficControlConfig = trafficControlConfig, + )(protocolVersionRepresentativeFor(ProtoVersion(2))) + .leftMap(_.toProtoDeserializationError) + } yield domainParameters + } + + class InvalidDynamicDomainParameters(message: String) extends RuntimeException(message) { + lazy val toProtoDeserializationError: ProtoDeserializationError.InvariantViolation = + ProtoDeserializationError.InvariantViolation(message) + } +} + +/** Dynamic domain parameters and their validity interval. + * Mostly so that we can perform additional checks. + * + * @param validFrom Start point of the validity interval (exclusive) + * @param validUntil End point of the validity interval (inclusive) + */ +final case class DynamicDomainParametersWithValidity( + parameters: DynamicDomainParameters, + validFrom: CantonTimestamp, + validUntil: Option[CantonTimestamp], + domainId: DomainId, +) { + def map[T](f: DynamicDomainParameters => T): DomainParameters.WithValidity[T] = + DomainParameters.WithValidity(validFrom, validUntil, f(parameters)) + + def isValidAt(ts: CantonTimestamp): Boolean = + validFrom < ts && validUntil.forall(ts <= _) + + private def checkValidity(ts: CantonTimestamp, goal: String): Either[String, Unit] = Either.cond( + isValidAt(ts), + (), + s"Cannot compute $goal for `$ts` because validity of parameters is ($validFrom, $validUntil]", + ) + + /** Computes the decision time for the given activeness time. + * + * @param activenessTime + * @return Left in case of error, the decision time otherwise + */ + def decisionTimeFor(activenessTime: CantonTimestamp): Either[String, CantonTimestamp] = + checkValidity(activenessTime, "decision time").map(_ => + activenessTime + .add(parameters.participantResponseTimeout.unwrap) + .add(parameters.mediatorReactionTimeout.unwrap) + ) + + /** Computes the decision time for the given activeness time. + * + * @param activenessTime + * @return Decision time or a failed future in case of error + */ + def decisionTimeForF(activenessTime: CantonTimestamp): Future[CantonTimestamp] = + decisionTimeFor(activenessTime).fold( + err => Future.failed(new IllegalStateException(err)), + Future.successful, + ) + + def transferExclusivityLimitFor(baseline: CantonTimestamp): Either[String, CantonTimestamp] = + checkValidity(baseline, "transfer exclusivity limit").map(_ => + baseline.add(transferExclusivityTimeout.unwrap) + ) + + def participantResponseDeadlineFor(timestamp: CantonTimestamp): Either[String, CantonTimestamp] = + checkValidity(timestamp, "participant response deadline").map(_ => + timestamp.add(parameters.participantResponseTimeout.unwrap) + ) + + def participantResponseDeadlineForF(timestamp: CantonTimestamp): Future[CantonTimestamp] = + participantResponseDeadlineFor(timestamp).toFuture(new IllegalStateException(_)) + + def automaticTransferInEnabled: Boolean = parameters.automaticTransferInEnabled + def mediatorDeduplicationTimeout: NonNegativeFiniteDuration = + parameters.mediatorDeduplicationTimeout + + def topologyChangeDelay: NonNegativeFiniteDuration = parameters.topologyChangeDelay + def transferExclusivityTimeout: NonNegativeFiniteDuration = parameters.transferExclusivityTimeout + def sequencerSigningTolerance: NonNegativeFiniteDuration = parameters.sequencerSigningTolerance +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParametersLookup.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParametersLookup.scala new file mode 100644 index 0000000000..c4f2fab83f --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParametersLookup.scala @@ -0,0 +1,108 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.protocol.DomainParameters.MaxRequestSize +import com.digitalasset.canton.topology.client.DomainTopologyClient +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.ProtocolVersion + +import scala.concurrent.{ExecutionContext, Future} + +/** This class allows to query domain parameters easily. + * Type parameter `P` is the type of the returned value. + */ +class DynamicDomainParametersLookup[P]( + projector: DynamicDomainParameters => P, + topologyClient: DomainTopologyClient, + protocolVersion: ProtocolVersion, + futureSupervisor: FutureSupervisor, + protected val loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends NamedLogging { + + /** Return one value, valid at the specified timestamp + * + * @param warnOnUsingDefaults Log a warning if dynamic domain parameters are not set + * and default value is used. + */ + def get(validAt: CantonTimestamp, warnOnUsingDefaults: Boolean = true)(implicit + traceContext: TraceContext + ): Future[P] = futureSupervisor + .supervised(s"Querying for domain parameters valid at $validAt") { + topologyClient.awaitSnapshot(validAt) + } + .flatMap(_.findDynamicDomainParametersOrDefault(protocolVersion, warnOnUsingDefaults)) + .map(projector) + + /** Return the value of the topology snapshot approximation + * or the default value. + */ + def getApproximateOrDefaultValue(warnOnUsingDefaults: Boolean = true)(implicit + traceContext: TraceContext + ): Future[P] = + topologyClient.currentSnapshotApproximation + .findDynamicDomainParametersOrDefault(protocolVersion, warnOnUsingDefaults) + .map(projector) + + /** Return the value of the topology snapshot approximation. + */ + def getApproximate()(implicit traceContext: TraceContext): Future[Option[P]] = + topologyClient.currentSnapshotApproximation + .findDynamicDomainParameters() + .map(_.map(p => projector(p.parameters)).toOption) + + /** Return a list of parameters, together with their validity interval, + * + * @param warnOnUsingDefaults Log a warning if dynamic domain parameters are not set + * and default value is used. + */ + def getAll(validAt: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[Seq[DomainParameters.WithValidity[P]]] = + futureSupervisor + .supervised(s"Querying for list of domain parameters changes valid at $validAt") { + topologyClient.awaitSnapshot(validAt) + } + .flatMap(_.listDynamicDomainParametersChanges()) + .map { domainParametersChanges => + domainParametersChanges.map(_.map(projector)) + } + + /** Return the approximate latest validity/freshness. + * Returned value is the approximate timestamp of the `TopologyClient`. + */ + def approximateTimestamp: CantonTimestamp = topologyClient.approximateTimestamp +} + +object DomainParametersLookup { + def forSequencerDomainParameters( + staticDomainParameters: StaticDomainParameters, + overrideMaxRequestSize: Option[NonNegativeInt], + topologyClient: DomainTopologyClient, + futureSupervisor: FutureSupervisor, + loggerFactory: NamedLoggerFactory, + )(implicit ec: ExecutionContext): DynamicDomainParametersLookup[SequencerDomainParameters] = { + new DynamicDomainParametersLookup( + params => + SequencerDomainParameters( + params.maxRatePerParticipant, + overrideMaxRequestSize.map(MaxRequestSize).getOrElse(params.maxRequestSize), + ), + topologyClient, + staticDomainParameters.protocolVersion, + futureSupervisor, + loggerFactory, + ) + } + + final case class SequencerDomainParameters( + maxRatePerParticipant: NonNegativeInt, + maxRequestSize: MaxRequestSize, + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/DriverContractMetadata.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/DriverContractMetadata.scala new file mode 100644 index 0000000000..3571b05ebd --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/DriverContractMetadata.scala @@ -0,0 +1,57 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import com.daml.lf.data.Bytes as LfBytes +import com.digitalasset.canton.crypto.Salt +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.version.{ + HasVersionedMessageCompanion, + HasVersionedWrapper, + ProtoVersion, + ProtocolVersion, +} + +import scala.util.chaining.* + +final case class DriverContractMetadata(salt: Salt) + extends HasVersionedWrapper[DriverContractMetadata] + with PrettyPrinting { + override protected def companionObj = DriverContractMetadata + + override def pretty: Pretty[DriverContractMetadata] = prettyOfClass( + param("contract salt", _.salt.forHashing) + ) + + def toProtoV0: v0.DriverContractMetadata = + v0.DriverContractMetadata(Some(salt.toProtoV0)) + + def toLfBytes(protocolVersion: ProtocolVersion): LfBytes = + toByteArray(protocolVersion).pipe(LfBytes.fromByteArray) +} + +object DriverContractMetadata extends HasVersionedMessageCompanion[DriverContractMetadata] { + override def name: String = "driver contract metadata" + + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> ProtoCodec( + ProtocolVersion.v30, + supportedProtoVersion(v0.DriverContractMetadata)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + def fromProtoV0( + driverContractMetadataP: v0.DriverContractMetadata + ): ParsingResult[DriverContractMetadata] = { + val v0.DriverContractMetadata(saltP) = driverContractMetadataP + + ProtoConverter + .required("salt", saltP) + .flatMap(Salt.fromProtoV0) + .map(DriverContractMetadata(_)) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/GlobalKeySerialization.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/GlobalKeySerialization.scala new file mode 100644 index 0000000000..1f675e48ac --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/GlobalKeySerialization.scala @@ -0,0 +1,64 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import cats.syntax.either.* +import com.daml.lf.value.ValueCoder.{CidEncoder as LfDummyCidEncoder} +import com.daml.lf.value.{ValueCoder, ValueOuterClass} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.{LfVersioned, ProtoDeserializationError} + +object GlobalKeySerialization { + + def toProto(globalKey: LfVersioned[LfGlobalKey]): Either[String, v0.GlobalKey] = { + val serializedTemplateId = + ValueCoder.encodeIdentifier(globalKey.unversioned.templateId).toByteString + for { + // Contract keys are not allowed to hold contract ids; therefore it is "okay" + // to use a dummy LfContractId encoder. + serializedKey <- ValueCoder + .encodeVersionedValue(LfDummyCidEncoder, globalKey.version, globalKey.unversioned.key) + .map(_.toByteString) + .leftMap(_.errorMessage) + } yield v0.GlobalKey(serializedTemplateId, serializedKey) + } + + def assertToProto(key: LfVersioned[LfGlobalKey]): v0.GlobalKey = + toProto(key) + .fold( + err => throw new IllegalArgumentException(s"Can't encode contract key: $err"), + identity, + ) + + def fromProtoV0(protoKey: v0.GlobalKey): ParsingResult[LfVersioned[LfGlobalKey]] = + for { + pTemplateId <- ProtoConverter.protoParser(ValueOuterClass.Identifier.parseFrom)( + protoKey.templateId + ) + templateId <- ValueCoder + .decodeIdentifier(pTemplateId) + .leftMap(err => + ProtoDeserializationError + .ValueDeserializationError("GlobalKey.templateId", err.errorMessage) + ) + deserializedProtoKey <- ProtoConverter.protoParser(ValueOuterClass.VersionedValue.parseFrom)( + protoKey.key + ) + + versionedKey <- ValueCoder + .decodeVersionedValue(ValueCoder.CidDecoder, deserializedProtoKey) + .leftMap(err => + ProtoDeserializationError.ValueDeserializationError("GlobalKey.proto", err.toString) + ) + + globalKey <- LfGlobalKey + .build(templateId, versionedKey.unversioned) + .leftMap(err => + ProtoDeserializationError.ValueDeserializationError("GlobalKey.key", err.toString) + ) + + } yield LfVersioned(versionedKey.version, globalKey) + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/HasSerializableContract.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/HasSerializableContract.scala new file mode 100644 index 0000000000..471a2f8298 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/HasSerializableContract.scala @@ -0,0 +1,14 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import com.digitalasset.canton.TransferCounterO + +trait HasSerializableContract { + + def contract: SerializableContract + + def transferCounter: TransferCounterO + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/InputContract.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/InputContract.scala new file mode 100644 index 0000000000..ce366bea50 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/InputContract.scala @@ -0,0 +1,55 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import com.digitalasset.canton.LfPartyId +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult + +/** @param consumed Whether this contract is consumed in the core of the view this [[InputContract]] belongs to. + * + * @see com.digitalasset.canton.data.ViewParticipantData.coreInputs + */ +final case class InputContract(contract: SerializableContract, consumed: Boolean) + extends PrettyPrinting { + + def contractId: LfContractId = contract.contractId + + def contractKey: Option[LfGlobalKey] = contract.metadata.maybeKey + + def stakeholders: Set[LfPartyId] = contract.metadata.stakeholders + + def maintainers: Set[LfPartyId] = contract.metadata.maintainers + + def toProtoV1: v1.InputContract = + v1.InputContract( + contract = Some(contract.toProtoV1), + consumed = consumed, + ) + + override def pretty: Pretty[InputContract] = prettyOfClass( + unnamedParam(_.contract), + paramIfTrue("consumed", _.consumed), + ) +} + +object InputContract { + def fromProtoV1( + inputContractP: v1.InputContract + ): ParsingResult[InputContract] = { + val v1.InputContract(contractP, consumed) = inputContractP + toInputContract(contractP, consumed, SerializableContract.fromProtoV1) + } + + private def toInputContract[SerializableContractP]( + serializableContractO: Option[SerializableContractP], + consumed: Boolean, + deserializeContract: SerializableContractP => ParsingResult[SerializableContract], + ): ParsingResult[InputContract] = + ProtoConverter + .required("InputContract.contract", serializableContractO) + .flatMap(deserializeContract) + .map(InputContract(_, consumed)) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/LfHashSyntax.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/LfHashSyntax.scala new file mode 100644 index 0000000000..4030136c77 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/LfHashSyntax.scala @@ -0,0 +1,27 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import cats.syntax.either.* +import com.daml.lf.data.Bytes +import com.digitalasset.canton.ProtoDeserializationError.ValueDeserializationError +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.google.protobuf.ByteString + +object LfHashSyntax { + + implicit class LfHashSyntax(private val hash: LfHash) extends AnyVal { + def toProtoPrimitive: ByteString = hash.bytes.toByteString + } + + implicit class LfHashObjectSyntax(private val lfHash: LfHash.type) extends AnyVal { + def fromProtoPrimitive( + fieldName: String, + bytes: ByteString, + ): ParsingResult[LfHash] = + LfHash + .fromBytes(Bytes.fromByteString(bytes)) + .leftMap(err => ValueDeserializationError(fieldName, err)) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/Phase37Processor.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/Phase37Processor.scala new file mode 100644 index 0000000000..6ccaf60103 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/Phase37Processor.scala @@ -0,0 +1,71 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.protocol.messages.* +import com.digitalasset.canton.sequencing.HandlerResult +import com.digitalasset.canton.sequencing.protocol.{Deliver, EventWithErrors, SignedContent} +import com.digitalasset.canton.topology.MediatorRef +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.{RequestCounter, SequencerCounter} + +trait Phase37Processor[RequestBatch] { + + /** Processes a request (Phase 3) and sends the response to the mediator if appropriate. + * + * @param ts The timestamp on the request + * @param rc The request counter of the request + * @param sc The sequencer counter of the request + * @param batch The batch in the request + * @return The returned future completes when request has reached the confirmed state + * and the response has been sent, or if an error aborts processing. + */ + def processRequest( + ts: CantonTimestamp, + rc: RequestCounter, + sc: SequencerCounter, + batch: RequestBatch, + )(implicit + traceContext: TraceContext + ): HandlerResult + + def processMalformedMediatorRequestResult( + timestamp: CantonTimestamp, + sequencerCounter: SequencerCounter, + signedResultBatch: Either[ + EventWithErrors[Deliver[DefaultOpenEnvelope]], + SignedContent[Deliver[DefaultOpenEnvelope]], + ], + )(implicit traceContext: TraceContext): HandlerResult + + /** Processes a result message, commits the changes or rolls them back and emits events via the + * [[com.digitalasset.canton.participant.event.RecordOrderPublisher]]. + * + * @param signedResultBatchE The signed result batch to process. The batch must contain exactly one message. + * @return The [[com.digitalasset.canton.sequencing.HandlerResult]] completes when the request has reached the state + * [[com.digitalasset.canton.participant.protocol.RequestJournal.RequestState.Clean]] + * and the event has been sent to the [[com.digitalasset.canton.participant.event.RecordOrderPublisher]], + * or if the processing aborts with an error. + */ + def processResult( + signedResultBatchE: Either[ + EventWithErrors[Deliver[DefaultOpenEnvelope]], + SignedContent[Deliver[DefaultOpenEnvelope]], + ] + )(implicit + traceContext: TraceContext + ): HandlerResult +} + +/** Request messages, along with the root hash message, the mediator ID that received the root hash message, + * and whether the delivery was a receipt or not (i.e. contained a message ID). + */ +final case class RequestAndRootHashMessage[RequestEnvelope]( + requestEnvelopes: NonEmpty[Seq[RequestEnvelope]], + rootHashMessage: RootHashMessage[SerializedRootHashMessagePayload], + mediator: MediatorRef, + isReceipt: Boolean, +) diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/RefIdentifierSyntax.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/RefIdentifierSyntax.scala new file mode 100644 index 0000000000..557566fa7d --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/RefIdentifierSyntax.scala @@ -0,0 +1,21 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import cats.syntax.either.* +import com.daml.lf.data.Ref +import com.daml.lf.data.Ref.Identifier +import com.digitalasset.canton.ProtoDeserializationError.ValueDeserializationError + +object RefIdentifierSyntax { + implicit class RefIdentifierSyntax(private val identifier: Ref.Identifier) extends AnyVal { + def toProtoPrimitive: String = identifier.toString() + } + + def fromProtoPrimitive( + interfaceIdP: String + ): Either[ValueDeserializationError, Identifier] = Ref.Identifier + .fromString(interfaceIdP) + .leftMap(err => ValueDeserializationError("identifier", err)) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/ResolvedKey.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/ResolvedKey.scala new file mode 100644 index 0000000000..707cd8572e --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/ResolvedKey.scala @@ -0,0 +1,34 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import com.digitalasset.canton.LfVersioned +import com.digitalasset.canton.data.SerializableKeyResolution +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult + +final case class ResolvedKey(key: LfGlobalKey, resolution: SerializableKeyResolution) { + def toProtoV3: v3.ViewParticipantData.ResolvedKey = + v3.ViewParticipantData.ResolvedKey( + // oddity: pass the version from resolution to the proto-key + key = Some(GlobalKeySerialization.assertToProto(LfVersioned(resolution.version, key))), + resolution = resolution.toProtoOneOfV0, + ) +} + +object ResolvedKey { + def fromProtoV3( + resolvedKeyP: v3.ViewParticipantData.ResolvedKey + ): ParsingResult[ResolvedKey] = { + val v3.ViewParticipantData.ResolvedKey(keyP, resolutionP) = resolvedKeyP + for { + keyWithVersion <- ProtoConverter + .required("ResolvedKey.key", keyP) + .flatMap(GlobalKeySerialization.fromProtoV0) + LfVersioned(version, key) = keyWithVersion + // oddity: pass the version from the proto-key to resolution + resolution <- SerializableKeyResolution.fromProtoOneOfV3(resolutionP, version) + } yield ResolvedKey(key, resolution) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/RollbackContext.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/RollbackContext.scala new file mode 100644 index 0000000000..27f2e93459 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/RollbackContext.scala @@ -0,0 +1,121 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import cats.syntax.either.* +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.RollbackContext.{RollbackScope, RollbackSibling, firstChild} +import com.digitalasset.canton.protocol.v3 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult + +import scala.Ordering.Implicits.* +import scala.math.Ordered.orderingToOrdered + +/** RollbackContext tracks the location of lf transaction nodes or canton participant views within a hierarchy of + * LfNodeRollback suitable for maintaining the local position within the hierarchy of rollback nodes when iterating + * over a transaction. + * @param rbScope scope or path of sibling ordinals ordered "from the outside-in", e.g. [2, 1, 3] points via the + * second rollback node to its first rollback node descendant to the latter's third rollback node + * descendant where rollback node "levels" may be separated from the root and each other only by + * non-rollback nodes. + * @param nextChild the next sibling ordinal to assign to a newly encountered rollback node. This is needed on top of + * rbScope in use cases in which the overall transaction structure is not known a priori. + */ +final case class RollbackContext private ( + private val rbScope: Vector[RollbackSibling], + private val nextChild: RollbackSibling = firstChild, +) extends PrettyPrinting + with Ordered[RollbackContext] { + + def enterRollback: RollbackContext = new RollbackContext(rbScope :+ nextChild) + + def exitRollback: RollbackContext = { + val lastChild = + rbScope.lastOption.getOrElse( + throw new IllegalStateException("Attempt to exit rollback on empty rollback context") + ) + + new RollbackContext(rbScope.dropRight(1), lastChild.increment) + } + + def rollbackScope: RollbackScope = rbScope + + def inRollback: Boolean = rbScope.nonEmpty + + def isEmpty: Boolean = equals(RollbackContext.empty) + + def toProtoV3: v3.ViewParticipantData.RollbackContext = + v3.ViewParticipantData.RollbackContext( + rollbackScope = rollbackScope.map(_.unwrap), + nextChild = nextChild.unwrap, + ) + + override def pretty: Pretty[RollbackContext] = prettyOfClass( + param("rollback scope", _.rollbackScope), + param("next child", _.nextChild), + ) + + private lazy val sortKey: Vector[PositiveInt] = rbScope :+ nextChild + override def compare(that: RollbackContext): Int = sortKey.compare(that.sortKey) + +} + +final case class WithRollbackScope[T](rbScope: RollbackScope, unwrap: T) + +object RollbackContext { + type RollbackSibling = PositiveInt + private val firstChild: RollbackSibling = PositiveInt.one + + type RollbackScope = Seq[RollbackSibling] + + object RollbackScope { + def empty: RollbackScope = Vector.empty[RollbackSibling] + + def popsAndPushes(origin: RollbackScope, target: RollbackScope): (Int, Int) = { + val longestCommonRollbackScopePrefixLength = + origin.lazyZip(target).takeWhile { case (i, j) => i == j }.size + + val rbPops = origin.length - longestCommonRollbackScopePrefixLength + val rbPushes = target.length - longestCommonRollbackScopePrefixLength + (rbPops, rbPushes) + } + } + + def empty: RollbackContext = new RollbackContext(Vector.empty) + + def apply(scope: RollbackScope): RollbackContext = new RollbackContext(scope.toVector) + + def fromProtoV0( + maybeRbContext: Option[v3.ViewParticipantData.RollbackContext] + ): ParsingResult[RollbackContext] = + maybeRbContext.fold(Either.right[ProtoDeserializationError, RollbackContext](empty)) { + case v3.ViewParticipantData.RollbackContext(rbScope, nextChildP) => + import cats.syntax.traverse.* + + for { + nextChild <- PositiveInt + .create(nextChildP) + .leftMap(_ => + ProtoDeserializationError.ValueConversionError( + "next_child", + s"positive value expected; found: $nextChildP", + ) + ) + + rbScopeVector <- rbScope.toVector.zipWithIndex + .traverse { case (value, idx) => + PositiveInt.create(value).leftMap { _ => + s"positive value expected; found $value at position $idx" + } + } + .leftMap( + ProtoDeserializationError.ValueConversionError("rollback_scope", _) + ) + + } yield new RollbackContext(rbScopeVector, nextChild) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/SerializableContract.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/SerializableContract.scala new file mode 100644 index 0000000000..ede73d7c4b --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/SerializableContract.scala @@ -0,0 +1,201 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import cats.implicits.toTraverseOps +import cats.syntax.either.* +import com.daml.lf.value.ValueCoder +import com.digitalasset.canton.ProtoDeserializationError.ValueConversionError +import com.digitalasset.canton.crypto.Salt +import com.digitalasset.canton.data.{CantonTimestamp, ProcessedDisclosedContract} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting, PrettyUtil} +import com.digitalasset.canton.protocol.ContractIdSyntax.* +import com.digitalasset.canton.protocol.SerializableContract.LedgerCreateTime +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.version.* +import com.digitalasset.canton.{LfTimestamp, crypto} +import com.google.protobuf.ByteString +import com.google.protobuf.timestamp.Timestamp + +import java.time.Instant + +/** Represents a serializable contract. + * + * @param contractId The ID of the contract. + * @param rawContractInstance The raw instance of the contract. + * @param metadata The metadata with stakeholders and signatories; can be computed from contract instance + * @param ledgerCreateTime The ledger time of the transaction '''creating''' the contract + */ +// This class is a reference example of serialization best practices, demonstrating: +// - use of an UntypedVersionedMessage wrapper when serializing to an anonymous binary format. For a more extensive example of this, +// please also see the writeup under `Backwards-incompatible Protobuf changes` in `CONTRIBUTING.md`. + +// Please consult the team if you intend to change the design of serialization. +@SuppressWarnings(Array("org.wartremover.warts.FinalCaseClass")) // This class is mocked in tests +case class SerializableContract( + contractId: LfContractId, + rawContractInstance: SerializableRawContractInstance, + metadata: ContractMetadata, + ledgerCreateTime: LedgerCreateTime, + contractSalt: Option[Salt], +) +// The class implements `HasVersionedWrapper` because we serialize it to an anonymous binary format (ByteString/Array[Byte]) when +// writing to the TransferStore and thus need to encode the version of the serialized Protobuf message + extends HasVersionedWrapper[SerializableContract] + // Even if implementing HasVersionedWrapper, we should still implement HasProtoV0 + with PrettyPrinting { + + def contractInstance: LfContractInst = rawContractInstance.contractInstance + + override protected def companionObj = SerializableContract + + def toProtoV1: v1.SerializableContract = + v1.SerializableContract( + contractId = contractId.toProtoPrimitive, + rawContractInstance = rawContractInstance.getCryptographicEvidence, + // Even though [[ContractMetadata]] also implements `HasVersionedWrapper`, we explicitly use Protobuf V0 + // -> we only use `UntypedVersionedMessage` when required and not for 'regularly' nested Protobuf messages + metadata = Some(metadata.toProtoV1), + ledgerCreateTime = Some(ledgerCreateTime.toProtoPrimitive), + // Contract salt can be empty for contracts created in protocol versions < 4. + contractSalt = contractSalt.map(_.toProtoV0), + ) + + override def pretty: Pretty[SerializableContract] = prettyOfClass( + param("contractId", _.contractId), + paramWithoutValue("instance"), // Do not leak confidential data (such as PII) to the log file! + param("metadata", _.metadata), + param("create time", _.ledgerCreateTime.ts), + paramIfDefined("contract salt", _.contractSalt), + ) + + def toLf: LfNodeCreate = LfNodeCreate( + coid = contractId, + templateId = rawContractInstance.contractInstance.unversioned.template, + arg = rawContractInstance.contractInstance.unversioned.arg, + agreementText = rawContractInstance.unvalidatedAgreementText.v, + signatories = metadata.signatories, + stakeholders = metadata.stakeholders, + keyOpt = metadata.maybeKeyWithMaintainers, + version = rawContractInstance.contractInstance.version, + ) + +} + +object SerializableContract + extends HasVersionedMessageCompanion[SerializableContract] + with HasVersionedMessageCompanionDbHelpers[SerializableContract] { + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> ProtoCodec( + ProtocolVersion.v30, + supportedProtoVersion(v1.SerializableContract)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + override def name: String = "serializable contract" + + // Ledger time of the "repair transaction" creating the contract + final case class LedgerCreateTime(ts: CantonTimestamp) extends AnyVal { + def toProtoPrimitive: Timestamp = ts.toProtoPrimitive + def toInstant: Instant = ts.toInstant + def toLf: LfTimestamp = ts.toLf + } + + object LedgerCreateTime extends PrettyUtil { + implicit val ledgerCreateTimeOrdering: Ordering[LedgerCreateTime] = Ordering.by(_.ts) + implicit val prettyLedgerCreateTime: Pretty[LedgerCreateTime] = + prettyOfClass[LedgerCreateTime](param("ts", _.ts)) + } + + def apply( + contractId: LfContractId, + contractInstance: LfContractInst, + metadata: ContractMetadata, + ledgerTime: CantonTimestamp, + contractSalt: Option[Salt], + unvalidatedAgreementText: AgreementText, + ): Either[ValueCoder.EncodeError, SerializableContract] = + SerializableRawContractInstance + .create(contractInstance, unvalidatedAgreementText) + .map( + SerializableContract(contractId, _, metadata, LedgerCreateTime(ledgerTime), contractSalt) + ) + + def fromDisclosedContract( + disclosedContract: ProcessedDisclosedContract + ): Either[String, SerializableContract] = { + val create = disclosedContract.create + val ledgerTime = CantonTimestamp(disclosedContract.createdAt) + val driverContractMetadataBytes = disclosedContract.driverMetadata.toByteArray + + for { + disclosedContractIdVersion <- CantonContractIdVersion + .ensureCantonContractId(disclosedContract.contractId) + .leftMap(err => s"Invalid disclosed contract id: ${err.toString}") + salt <- { + if (driverContractMetadataBytes.isEmpty) + Left[String, Option[Salt]]( + value = "Missing driver contract metadata in provided disclosed contract" + ) + else + DriverContractMetadata + .fromByteArray(driverContractMetadataBytes) + .leftMap(err => s"Failed parsing disclosed contract driver contract metadata: $err") + .map(m => Some(m.salt)) + } + contractInstance = create.versionedCoinst + cantonContractMetadata <- ContractMetadata.create( + signatories = create.signatories, + stakeholders = create.stakeholders, + maybeKeyWithMaintainers = create.versionedKeyOpt, + ) + contract <- SerializableContract( + contractId = disclosedContract.contractId, + contractInstance = contractInstance, + metadata = cantonContractMetadata, + ledgerTime = ledgerTime, + contractSalt = salt, + unvalidatedAgreementText = AgreementText(create.agreementText), + ).leftMap(err => s"Failed creating serializable contract from disclosed contract: $err") + } yield contract + } + + def fromProtoV1( + serializableContractInstanceP: v1.SerializableContract + ): ParsingResult[SerializableContract] = { + val v1.SerializableContract(contractIdP, rawP, metadataP, ledgerCreateTime, contractSaltP) = + serializableContractInstanceP + + toSerializableContract(contractIdP, rawP, metadataP, ledgerCreateTime, contractSaltP) + } + + private def toSerializableContract( + contractIdP: String, + rawP: ByteString, + metadataP: Option[v1.SerializableContract.Metadata], + ledgerCreateTime: Option[Timestamp], + contractSaltO: Option[crypto.v0.Salt], + ): ParsingResult[SerializableContract] = + for { + contractId <- ProtoConverter.parseLfContractId(contractIdP) + raw <- SerializableRawContractInstance + .fromByteString(rawP) + .leftMap(error => ValueConversionError("raw_contract_instance", error.toString)) + metadata <- ProtoConverter + .required("metadata", metadataP) + .flatMap(ContractMetadata.fromProtoV1) + ledgerTime <- ProtoConverter + .required("ledger_create_time", ledgerCreateTime) + .flatMap(CantonTimestamp.fromProtoPrimitive) + contractSalt <- contractSaltO.traverse(Salt.fromProtoV0) + } yield SerializableContract( + contractId, + raw, + metadata, + LedgerCreateTime(ledgerTime), + contractSalt, + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/SerializableContractWithWitnesses.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/SerializableContractWithWitnesses.scala new file mode 100644 index 0000000000..dad8d5d12c --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/SerializableContractWithWitnesses.scala @@ -0,0 +1,16 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import com.digitalasset.canton.data.RepairContract +import com.digitalasset.canton.topology.PartyId + +/* + Allows backward compatibility for user scripts + TODO(#14441) Remove this object + */ +object SerializableContractWithWitnesses { + def apply(contract: SerializableContract, witnesses: Set[PartyId]): RepairContract = + RepairContract(contract, witnesses, None) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/SerializableDeduplicationPeriod.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/SerializableDeduplicationPeriod.scala new file mode 100644 index 0000000000..7a51bf3281 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/SerializableDeduplicationPeriod.scala @@ -0,0 +1,39 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import com.daml.lf.data.{Bytes as LfBytes} +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.ledger.api.DeduplicationPeriod +import com.digitalasset.canton.ledger.offset.Offset +import com.digitalasset.canton.serialization.ProtoConverter.{DurationConverter, ParsingResult} + +final case class SerializableDeduplicationPeriod(deduplicationPeriod: DeduplicationPeriod) { + def toProtoV0: v0.DeduplicationPeriod = deduplicationPeriod match { + case duration: DeduplicationPeriod.DeduplicationDuration => + v0.DeduplicationPeriod( + v0.DeduplicationPeriod.Period.Duration( + DurationConverter.toProtoPrimitive(duration.duration) + ) + ) + case offset: DeduplicationPeriod.DeduplicationOffset => + v0.DeduplicationPeriod(v0.DeduplicationPeriod.Period.Offset(offset.offset.bytes.toByteString)) + } +} +object SerializableDeduplicationPeriod { + def fromProtoV0( + deduplicationPeriodP: v0.DeduplicationPeriod + ): ParsingResult[DeduplicationPeriod] = { + val dedupP = v0.DeduplicationPeriod.Period + deduplicationPeriodP.period match { + case dedupP.Empty => Left(ProtoDeserializationError.FieldNotSet("DeduplicationPeriod.value")) + case dedupP.Duration(duration) => + DurationConverter + .fromProtoPrimitive(duration) + .map(DeduplicationPeriod.DeduplicationDuration) + case dedupP.Offset(offset) => + Right(DeduplicationPeriod.DeduplicationOffset(Offset(LfBytes.fromByteString(offset)))) + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/SerializableRawContractInstance.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/SerializableRawContractInstance.scala new file mode 100644 index 0000000000..1e11e787b1 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/SerializableRawContractInstance.scala @@ -0,0 +1,118 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import cats.syntax.either.* +import com.daml.lf.transaction.{TransactionCoder, TransactionOuterClass} +import com.daml.lf.value.Value.ContractInstanceWithAgreement +import com.daml.lf.value.ValueCoder +import com.digitalasset.canton.ProtoDeserializationError.ValueConversionError +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{ + MemoizedEvidenceWithFailure, + ProtoConverter, + SerializationCheckFailed, +} +import com.digitalasset.canton.store.db.DbDeserializationException +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.ByteString +import monocle.Lens +import monocle.macros.GenLens +import slick.jdbc.{GetResult, SetParameter} + +/** Represents a serializable contract instance and memoizes the serialization. + * + * @param contractInstance The contract instance whose serialization is to be memoized. + * @param deserializedFrom If set, the given [[ByteString]] will be deemed to be the valid serialization for + * the given contract instance. If [[None]], + * the serialization is produced by [[TransactionCoder.encodeContractInstance]]. + */ +final case class SerializableRawContractInstance private ( + contractInstance: LfContractInst, + // Keeping this in the serializable instance for historical reasons + // The agreement text may come from an untrusted node. + unvalidatedAgreementText: AgreementText, +)( + override val deserializedFrom: Option[ByteString] +) extends MemoizedEvidenceWithFailure[ValueCoder.EncodeError] { + + /** @throws com.digitalasset.canton.serialization.SerializationCheckFailed If the serialization of the contract instance failed + */ + @throws[SerializationCheckFailed[ValueCoder.EncodeError]] + protected[this] override def toByteStringChecked: Either[ValueCoder.EncodeError, ByteString] = + TransactionCoder + .encodeContractInstance( + ValueCoder.CidEncoder, + contractInstance.map(ContractInstanceWithAgreement(_, unvalidatedAgreementText.v)), + ) + .map(_.toByteString) + + lazy val contractHash: LfHash = + LfHash.assertHashContractInstance( + contractInstance.unversioned.template, + contractInstance.unversioned.arg, + ) + + private def copy( + contractInstance: LfContractInst = this.contractInstance, + unvalidatedAgreementText: AgreementText = this.unvalidatedAgreementText, + ): SerializableRawContractInstance = + SerializableRawContractInstance(contractInstance, unvalidatedAgreementText)(None) +} + +object SerializableRawContractInstance { + + @VisibleForTesting + lazy val contractInstanceUnsafe: Lens[SerializableRawContractInstance, LfContractInst] = + GenLens[SerializableRawContractInstance](_.contractInstance) + + implicit def contractGetResult(implicit + getResultByteArray: GetResult[Array[Byte]] + ): GetResult[SerializableRawContractInstance] = GetResult { r => + SerializableRawContractInstance + .fromByteString(ByteString.copyFrom(r.<<[Array[Byte]])) + .getOrElse(throw new DbDeserializationException("Invalid contract instance")) + } + + implicit def contractSetParameter(implicit + setParameterByteArray: SetParameter[Array[Byte]] + ): SetParameter[SerializableRawContractInstance] = (c, pp) => + pp >> c.getCryptographicEvidence.toByteArray + + def create( + contractInstance: LfContractInst, + agreementText: AgreementText, + ): Either[ValueCoder.EncodeError, SerializableRawContractInstance] = + try { + Right(new SerializableRawContractInstance(contractInstance, agreementText)(None)) + } catch { + case SerializationCheckFailed(err: ValueCoder.EncodeError) => Left(err) + } + + /** Build a [[SerializableRawContractInstance]] from lf-protobuf and ContractId encoded ContractInst + * @param bytes byte string representing contract instance + * @return contract id + */ + def fromByteString( + bytes: ByteString + ): ParsingResult[SerializableRawContractInstance] = + for { + contractInstanceP <- ProtoConverter.protoParser( + TransactionOuterClass.ContractInstance.parseFrom + )(bytes) + contractInstanceAndAgreementText <- TransactionCoder + .decodeVersionedContractInstance(ValueCoder.CidDecoder, contractInstanceP) + .leftMap(error => ValueConversionError("", error.toString)) + ContractInstanceWithAgreement(_, agreementText) = contractInstanceAndAgreementText.unversioned + } yield createWithSerialization( + contractInstanceAndAgreementText.map(_.contractInstance), + AgreementText(agreementText), + )(bytes) + + @VisibleForTesting + def createWithSerialization(contractInst: LfContractInst, agreementText: AgreementText)( + deserializedFrom: ByteString + ): SerializableRawContractInstance = + new SerializableRawContractInstance(contractInst, agreementText)(Some(deserializedFrom)) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/Tags.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/Tags.scala new file mode 100644 index 0000000000..4b43571efd --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/Tags.scala @@ -0,0 +1,219 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import cats.Order +import cats.syntax.bifunctor.* +import com.digitalasset.canton.crypto.Hash +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{ + DeserializationError, + HasCryptographicEvidence, + ProtoConverter, +} +import com.digitalasset.canton.topology.DomainId +import com.digitalasset.canton.util.ByteStringUtil +import com.digitalasset.canton.{LedgerTransactionId, ProtoDeserializationError} +import com.google.protobuf.ByteString +import com.google.protobuf.timestamp.Timestamp as ProtoTimestamp +import slick.jdbc.{GetResult, SetParameter} + +/** The root hash of a Merkle tree used as an identifier for requests. + * + * Extends [[com.digitalasset.canton.serialization.HasCryptographicEvidence]] so that [[RootHash]]'s serialization + * can be used to compute the hash of an inner Merkle node from its children using [[RootHash.getCryptographicEvidence]]. + * Serialization to Protobuf fields can be done with [[RootHash.toProtoPrimitive]] + * + * Here is how we use it: + * (1) Every participant gets a “partially blinded” Merkle tree, defining the locations of the views they are privy to. + * (2) That Merkle tree has a root. That root has a hash. That’s the root hash. + * (3) The mediator receives a fully blinded Merkle tree, with the same hash. + * (4) The submitting participant will send for each receiving participant an additional “root hash message” in the + * same batch. That message will contain the same hash, with recipients (participant, mediator). + * (5) The mediator will check that all participants mentioned in the tree received a root hash message and that all + * hashes are equal. + * (6) Once the mediator sends out the verdict, the verdict will include the tree structure and thus the root hash. + * Hence, the participant will now have certainty about the mediator having checked all root hash messages + * and having observed the same tree structure. + */ +@SuppressWarnings(Array("org.wartremover.warts.FinalCaseClass")) // This class is mocked in tests +case class RootHash(private val hash: Hash) extends PrettyPrinting with HasCryptographicEvidence { + def unwrap: Hash = hash + + override def getCryptographicEvidence: ByteString = hash.getCryptographicEvidence + + def toProtoPrimitive: ByteString = getCryptographicEvidence + + def asLedgerTransactionId: Either[String, LedgerTransactionId] = + LedgerTransactionId.fromString(hash.toHexString) + + override def pretty: Pretty[RootHash] = prettyOfParam(_.unwrap) +} + +object RootHash { + implicit val setParameterRootHash: SetParameter[RootHash] = (rh, pp) => + pp >> rh.unwrap.toLengthLimitedHexString + + implicit val getResultRootHash: GetResult[RootHash] = GetResult { r => + RootHash(Hash.tryFromHexString(r.<<)) + } + + implicit val setParameterRootHashO: SetParameter[Option[RootHash]] = (rh, pp) => + pp >> rh.map(_.unwrap.toLengthLimitedHexString) + + implicit val getResultRootHashO: GetResult[Option[RootHash]] = { r => + r.<<[Option[String]].map(Hash.tryFromHexString).map(RootHash(_)) + } + + def fromByteString(bytes: ByteString): Either[DeserializationError, RootHash] = + Hash.fromByteString(bytes).map(RootHash(_)) + + def fromProtoPrimitive(bytes: ByteString): ParsingResult[RootHash] = + Hash.fromProtoPrimitive(bytes).map(RootHash(_)) + + def fromProtoPrimitiveOption( + bytes: ByteString + ): ParsingResult[Option[RootHash]] = + Hash.fromProtoPrimitiveOption(bytes).map(_.map(RootHash(_))) +} + +/** A hash-based transaction id. */ +final case class TransactionId(private val hash: Hash) extends HasCryptographicEvidence { + def unwrap: Hash = hash + + def toRootHash: RootHash = RootHash(hash) + + def toProtoPrimitive: ByteString = getCryptographicEvidence + + override def getCryptographicEvidence: ByteString = hash.getCryptographicEvidence + + def asLedgerTransactionId: Either[String, LedgerTransactionId] = + LedgerTransactionId.fromString(hash.toHexString) + + def tryAsLedgerTransactionId: LedgerTransactionId = + LedgerTransactionId.assertFromString(hash.toHexString) +} + +object TransactionId { + + def fromProtoPrimitive(bytes: ByteString): ParsingResult[TransactionId] = + Hash + .fromByteString(bytes) + .bimap(ProtoDeserializationError.CryptoDeserializationError, TransactionId(_)) + + def fromRootHash(rootHash: RootHash): TransactionId = TransactionId(rootHash.unwrap) + + /** Ordering for [[TransactionId]]s based on the serialized hash */ + implicit val orderTransactionId: Order[TransactionId] = + Order.by[TransactionId, ByteString](_.hash.getCryptographicEvidence)( + ByteStringUtil.orderByteString + ) + + implicit val orderingTransactionId: Ordering[TransactionId] = orderTransactionId.toOrdering + + implicit val prettyTransactionId: Pretty[TransactionId] = { + import Pretty.* + prettyOfParam(_.unwrap) + } + + implicit val setParameterTransactionId: SetParameter[TransactionId] = (v, pp) => pp.>>(v.hash) + + implicit val getResultTransactionId: GetResult[TransactionId] = GetResult { r => + TransactionId(r.<<) + } + + implicit val setParameterOptionTransactionId: SetParameter[Option[TransactionId]] = (v, pp) => + pp.>>(v.map(_.hash)) + + implicit val getResultOptionTransactionId: GetResult[Option[TransactionId]] = GetResult { r => + (r.<<[Option[Hash]]).map(TransactionId(_)) + } +} + +/** A hash-based transaction view id + * + * Views from different requests may have the same view hash. + */ +@SuppressWarnings(Array("org.wartremover.warts.FinalCaseClass")) // This class is mocked in tests +case class ViewHash(private val hash: Hash) extends PrettyPrinting { + def unwrap: Hash = hash + + def toProtoPrimitive: ByteString = hash.getCryptographicEvidence + + def toRootHash: RootHash = RootHash(hash) + + override def pretty: Pretty[ViewHash] = prettyOfClass(unnamedParam(_.hash)) +} + +object ViewHash { + + def fromProtoPrimitive(hash: ByteString): ParsingResult[ViewHash] = + Hash.fromProtoPrimitive(hash).map(ViewHash(_)) + + def fromProtoPrimitiveOption( + hash: ByteString + ): ParsingResult[Option[ViewHash]] = + Hash.fromProtoPrimitiveOption(hash).map(_.map(ViewHash(_))) + + def fromRootHash(hash: RootHash): ViewHash = ViewHash(hash.unwrap) + + /** Ordering for [[ViewHash]] based on the serialized hash */ + implicit val orderViewHash: Order[ViewHash] = + Order.by[ViewHash, ByteString](_.hash.getCryptographicEvidence)(ByteStringUtil.orderByteString) +} + +/** A confirmation request is identified by the sequencer timestamp. */ +final case class RequestId(private val ts: CantonTimestamp) extends PrettyPrinting { + def unwrap: CantonTimestamp = ts + + def toProtoPrimitive: ProtoTimestamp = ts.toProtoPrimitive + + override def pretty: Pretty[RequestId] = prettyOfClass(unnamedParam(_.ts)) +} + +object RequestId { + implicit val requestIdOrdering: Ordering[RequestId] = + Ordering.by[RequestId, CantonTimestamp](_.unwrap) + implicit val requestIdOrder: Order[RequestId] = Order.fromOrdering[RequestId] + + def fromProtoPrimitive(requestIdP: ProtoTimestamp): ParsingResult[RequestId] = + CantonTimestamp.fromProtoPrimitive(requestIdP).map(RequestId(_)) +} + +/** A transfer is identified by the source domain and the sequencer timestamp on the transfer-out request. */ +final case class TransferId(sourceDomain: SourceDomainId, transferOutTimestamp: CantonTimestamp) + extends PrettyPrinting { + def toProtoV0: v0.TransferId = + v0.TransferId( + originDomain = sourceDomain.toProtoPrimitive, + timestamp = Some(transferOutTimestamp.toProtoPrimitive), + ) + + override def pretty: Pretty[TransferId] = prettyOfClass( + param("ts", _.transferOutTimestamp), + param("source", _.sourceDomain), + ) +} + +object TransferId { + implicit val transferIdGetResult: GetResult[TransferId] = GetResult { r => + TransferId( + SourceDomainId(GetResult[DomainId].apply(r)), + GetResult[CantonTimestamp].apply(r), + ) + } + + def fromProtoV0(transferIdP: v0.TransferId): ParsingResult[TransferId] = + transferIdP match { + case v0.TransferId(sourceDomainP, requestTimestampP) => + for { + sourceDomain <- DomainId.fromProtoPrimitive(sourceDomainP, "TransferId.origin_domain") + requestTimestamp <- ProtoConverter + .required("TransferId.timestamp", requestTimestampP) + .flatMap(CantonTimestamp.fromProtoPrimitive) + } yield TransferId(SourceDomainId(sourceDomain), requestTimestamp) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/TransferDomainId.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/TransferDomainId.scala new file mode 100644 index 0000000000..f418392e33 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/TransferDomainId.scala @@ -0,0 +1,85 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import cats.kernel.Order +import com.digitalasset.canton.data.ViewType +import com.digitalasset.canton.data.ViewType.{TransferInViewType, TransferOutViewType} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.TransferDomainId.TransferDomainIdCast +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.DomainId +import slick.jdbc.{PositionedParameters, SetParameter} + +/** This trait can be used when distinction between source and target domain is important. + */ +sealed trait TransferDomainId extends PrettyPrinting with Product with Serializable { + def unwrap: DomainId + + def toProtoPrimitive: String = unwrap.toProtoPrimitive + + def toViewType: ViewType + + override def pretty: Pretty[this.type] = prettyOfParam(_.unwrap) +} + +object TransferDomainId { + implicit val orderTransferDomainId: Order[TransferDomainId] = + Order.by[TransferDomainId, String](_.toProtoPrimitive) + + implicit val setParameterTransferDomainId: SetParameter[TransferDomainId] = + (d: TransferDomainId, pp: PositionedParameters) => pp >> d.unwrap.toLengthLimitedString + implicit val setParameterTransferDomainIdO: SetParameter[Option[TransferDomainId]] = + (d: Option[TransferDomainId], pp: PositionedParameters) => + pp >> d.map(_.unwrap.toLengthLimitedString) + + trait TransferDomainIdCast[Kind <: TransferDomainId] { + def toKind(domain: TransferDomainId): Option[Kind] + } + + implicit val transferDomainIdCast: TransferDomainIdCast[TransferDomainId] = + (domain: TransferDomainId) => Some(domain) +} + +final case class SourceDomainId(id: DomainId) extends TransferDomainId { + override def unwrap: DomainId = id + + override def toViewType: TransferOutViewType = TransferOutViewType +} + +object SourceDomainId { + implicit val orderSourceDomainId: Order[SourceDomainId] = + Order.by[SourceDomainId, String](_.toProtoPrimitive) + + implicit val sourceDomainIdCast: TransferDomainIdCast[SourceDomainId] = { + case x: SourceDomainId => Some(x) + case _ => None + } + + def fromProtoPrimitive( + proto: String, + fieldName: String, + ): ParsingResult[SourceDomainId] = + DomainId.fromProtoPrimitive(proto, fieldName).map(SourceDomainId(_)) +} + +final case class TargetDomainId(id: DomainId) extends TransferDomainId { + override def unwrap: DomainId = id + + override def toViewType: TransferInViewType = TransferInViewType + +} + +object TargetDomainId { + implicit val targetDomainIdCast: TransferDomainIdCast[TargetDomainId] = { + case x: TargetDomainId => Some(x) + case _ => None + } + + def fromProtoPrimitive( + proto: String, + fieldName: String, + ): ParsingResult[TargetDomainId] = + DomainId.fromProtoPrimitive(proto, fieldName).map(TargetDomainId(_)) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/Unicum.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/Unicum.scala new file mode 100644 index 0000000000..fa708661c4 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/Unicum.scala @@ -0,0 +1,16 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import com.daml.lf.data.Bytes +import com.digitalasset.canton.crypto.Hash + +/** A hash-based identifier for contracts. + * Must be paired with a discriminator to obtain a complete contract ID. + */ +final case class Unicum(unwrap: Hash) extends AnyVal { + def toContractIdSuffix(contractIdVersion: CantonContractIdVersion): Bytes = + contractIdVersion.versionPrefixBytes ++ + Bytes.fromByteString(unwrap.getCryptographicEvidence) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/AcsCommitment.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/AcsCommitment.scala new file mode 100644 index 0000000000..698cc56f44 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/AcsCommitment.scala @@ -0,0 +1,253 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import cats.syntax.either.* +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.crypto.HashPurpose +import com.digitalasset.canton.data.{CantonTimestamp, CantonTimestampSecond} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.messages.SignedProtocolMessageContent.SignedMessageContentCast +import com.digitalasset.canton.protocol.v0 +import com.digitalasset.canton.resource.DbStorage +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.store.db.DbDeserializationException +import com.digitalasset.canton.time.PositiveSeconds +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.util.NoCopy +import com.digitalasset.canton.version.{ + HasMemoizedProtocolVersionedWrapperCompanion, + HasProtocolVersionedWrapper, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} +import com.google.protobuf.ByteString +import slick.jdbc.{GetResult, GetTupleResult, SetParameter} + +import scala.math.Ordering.Implicits.* + +final case class CommitmentPeriod( + fromExclusive: CantonTimestampSecond, + periodLength: PositiveSeconds, +) extends PrettyPrinting { + val toInclusive: CantonTimestampSecond = fromExclusive + periodLength + + def overlaps(other: CommitmentPeriod): Boolean = { + fromExclusive < other.toInclusive && toInclusive > other.fromExclusive + } + + override def pretty: Pretty[CommitmentPeriod] = + prettyOfClass( + param("fromExclusive", _.fromExclusive), + param("toInclusive", _.toInclusive), + ) +} + +object CommitmentPeriod { + def create( + fromExclusive: CantonTimestamp, + periodLength: PositiveSeconds, + interval: PositiveSeconds, + ): Either[String, CommitmentPeriod] = for { + from <- CantonTimestampSecond.fromCantonTimestamp(fromExclusive) + _ <- Either.cond( + periodLength.unwrap >= interval.unwrap || from == CantonTimestampSecond.MinValue, + (), + s"The period must be at least as large as the interval or start at MinValue, but is $periodLength and the interval is $interval", + ) + _ <- Either.cond( + from.getEpochSecond % interval.unwrap.getSeconds == 0 || from == CantonTimestampSecond.MinValue, + (), + s"The commitment period must start at a commitment tick or at MinValue, but it starts on $from, and the tick interval is $interval", + ) + toInclusive = from + periodLength + _ <- Either.cond( + toInclusive.getEpochSecond % interval.unwrap.getSeconds == 0, + (), + s"The commitment period must end at a commitment tick, but it ends on $toInclusive, and the tick interval is $interval", + ) + } yield CommitmentPeriod( + fromExclusive = from, + periodLength = periodLength, + ) + + def create( + fromExclusive: CantonTimestamp, + toInclusive: CantonTimestamp, + interval: PositiveSeconds, + ): Either[String, CommitmentPeriod] = + PositiveSeconds + .between(fromExclusive, toInclusive) + .flatMap(CommitmentPeriod.create(fromExclusive, _, interval)) + + def create( + fromExclusive: CantonTimestampSecond, + toInclusive: CantonTimestampSecond, + ): Either[String, CommitmentPeriod] = + PositiveSeconds.between(fromExclusive, toInclusive).map(CommitmentPeriod(fromExclusive, _)) + + implicit val getCommitmentPeriod: GetResult[CommitmentPeriod] = + new GetTupleResult[(CantonTimestampSecond, CantonTimestampSecond)]( + GetResult[CantonTimestampSecond], + GetResult[CantonTimestampSecond], + ).andThen { case (from, to) => + PositiveSeconds + .between(from, to) + .map(CommitmentPeriod(from, _)) + .valueOr(err => throw new DbDeserializationException(err)) + } + +} + +/** A commitment to the active contract set (ACS) that is shared between two participants on a given domain at a given time. + * + * Given a commitment scheme to the ACS, the semantics are as follows: the sender declares that the shared ACS was exactly + * the one committed to, at every commitment tick during the specified period and as determined by the period's interval. + * + * The interval is assumed to be a round number of seconds. The ticks then start at the Java EPOCH time, and are exactly `interval` apart. + */ +abstract sealed case class AcsCommitment private ( + domainId: DomainId, + sender: ParticipantId, + counterParticipant: ParticipantId, + period: CommitmentPeriod, + commitment: AcsCommitment.CommitmentType, +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[AcsCommitment.type], + override val deserializedFrom: Option[ByteString], +) extends HasProtocolVersionedWrapper[AcsCommitment] + with SignedProtocolMessageContent + with NoCopy { + + @transient override protected lazy val companionObj: AcsCommitment.type = AcsCommitment + + override def signingTimestamp: CantonTimestamp = period.toInclusive.forgetRefinement + + protected def toProtoV0: v0.AcsCommitment = { + v0.AcsCommitment( + domainId = domainId.toProtoPrimitive, + sendingParticipant = sender.toProtoPrimitive, + counterParticipant = counterParticipant.toProtoPrimitive, + fromExclusive = Some(period.fromExclusive.toProtoPrimitive), + toInclusive = Some(period.toInclusive.toProtoPrimitive), + commitment = AcsCommitment.commitmentTypeToProto(commitment), + ) + } + + override protected[this] def toByteStringUnmemoized: ByteString = + super[HasProtocolVersionedWrapper].toByteString + + override protected[messages] def toProtoTypedSomeSignedProtocolMessage + : v0.TypedSignedProtocolMessageContent.SomeSignedProtocolMessage = + v0.TypedSignedProtocolMessageContent.SomeSignedProtocolMessage.AcsCommitment( + getCryptographicEvidence + ) + + override def hashPurpose: HashPurpose = HashPurpose.AcsCommitment + + override lazy val pretty: Pretty[AcsCommitment] = { + prettyOfClass( + param("domainId", _.domainId), + param("sender", _.sender), + param("counterParticipant", _.counterParticipant), + param("period", _.period), + param("commitment", _.commitment), + ) + } +} + +object AcsCommitment extends HasMemoizedProtocolVersionedWrapperCompanion[AcsCommitment] { + override val name: String = "AcsCommitment" + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> VersionedProtoConverter(ProtocolVersion.v30)(v0.AcsCommitment)( + supportedProtoVersionMemoized(_)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + type CommitmentType = ByteString + implicit val getResultCommitmentType: GetResult[CommitmentType] = + DbStorage.Implicits.getResultByteString + implicit val setCommitmentType: SetParameter[CommitmentType] = + DbStorage.Implicits.setParameterByteString + + def commitmentTypeToProto(commitment: CommitmentType): ByteString = commitment + def commitmentTypeFromByteString(bytes: ByteString): CommitmentType = bytes + + def create( + domainId: DomainId, + sender: ParticipantId, + counterParticipant: ParticipantId, + period: CommitmentPeriod, + commitment: CommitmentType, + protocolVersion: ProtocolVersion, + ): AcsCommitment = + new AcsCommitment(domainId, sender, counterParticipant, period, commitment)( + protocolVersionRepresentativeFor(protocolVersion), + None, + ) {} + + private def fromProtoV0(protoMsg: v0.AcsCommitment)( + bytes: ByteString + ): ParsingResult[AcsCommitment] = { + for { + domainId <- DomainId.fromProtoPrimitive(protoMsg.domainId, "AcsCommitment.domainId") + sender <- ParticipantId.fromProtoPrimitive( + protoMsg.sendingParticipant, + "AcsCommitment.sender", + ) + counterParticipant <- ParticipantId.fromProtoPrimitive( + protoMsg.counterParticipant, + "AcsCommitment.counterParticipant", + ) + fromExclusive <- ProtoConverter + .required("AcsCommitment.period.fromExclusive", protoMsg.fromExclusive) + .flatMap(CantonTimestampSecond.fromProtoPrimitive) + toInclusive <- ProtoConverter + .required("AcsCommitment.period.toInclusive", protoMsg.toInclusive) + .flatMap(CantonTimestampSecond.fromProtoPrimitive) + + periodLength <- PositiveSeconds + .between(fromExclusive, toInclusive) + .leftMap { _ => + ProtoDeserializationError.InvariantViolation( + s"Illegal commitment period length: $fromExclusive, $toInclusive" + ) + } + + period = CommitmentPeriod(fromExclusive, periodLength) + cmt = protoMsg.commitment + commitment = commitmentTypeFromByteString(cmt) + } yield new AcsCommitment(domainId, sender, counterParticipant, period, commitment)( + protocolVersionRepresentativeFor(ProtoVersion(0)), + Some(bytes), + ) {} + } + + implicit val acsCommitmentCast: SignedMessageContentCast[AcsCommitment] = + SignedMessageContentCast.create[AcsCommitment]("AcsCommitment") { + case m: AcsCommitment => Some(m) + case _ => None + } + + def getAcsCommitmentResultReader( + domainId: DomainId, + protocolVersion: ProtocolVersion, + ): GetResult[AcsCommitment] = + new GetTupleResult[(ParticipantId, ParticipantId, CommitmentPeriod, CommitmentType)]( + GetResult[ParticipantId], + GetResult[ParticipantId], + GetResult[CommitmentPeriod], + GetResult[CommitmentType], + ).andThen { case (sender, counterParticipant, period, commitment) => + new AcsCommitment(domainId, sender, counterParticipant, period, commitment)( + protocolVersionRepresentativeFor(protocolVersion), + None, + ) {} + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/CausalityMessage.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/CausalityMessage.scala new file mode 100644 index 0000000000..9743162412 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/CausalityMessage.scala @@ -0,0 +1,143 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import cats.implicits.* +import com.digitalasset.canton.LfPartyId +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.messages.ProtocolMessage.ProtocolMessageContentCast +import com.digitalasset.canton.protocol.v4.EnvelopeContent +import com.digitalasset.canton.protocol.{SourceDomainId, TargetDomainId, TransferId, v0} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.DomainId +import com.digitalasset.canton.version.{ + HasProtocolVersionedCompanion, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} + +/** Causality messages are sent along with a transfer-in response. They propagate causality information on + * the events a participant has "seen" for a party at the time of the transfer-out. + * TODO(i9514): Encrypt the causality messages + * + * @param targetDomain The domain ID that the causality message is addressed to + * @param transferId The ID of the transfer for which we are propagating causality information + * @param clock The vector clock specifying causality information at the time of the transfer out + */ +final case class CausalityMessage private ( + targetDomain: TargetDomainId, + transferId: TransferId, + clock: VectorClock, +)(override val representativeProtocolVersion: RepresentativeProtocolVersion[CausalityMessage.type]) + extends UnsignedProtocolMessage + with PrettyPrinting { + + val domainId = targetDomain.unwrap + + def toProtoV0: v0.CausalityMessage = v0.CausalityMessage( + targetDomainId = domainId.toProtoPrimitive, + transferId = Some(transferId.toProtoV0), + clock = Some(clock.toProtoV0), + ) + + override protected[messages] def toProtoSomeEnvelopeContentV4 + : EnvelopeContent.SomeEnvelopeContent = throw new RuntimeException( + "This should not be called" + ) + + override def pretty: Pretty[CausalityMessage.this.type] = + prettyOfClass( + param("Message domain ", _.domainId), + param("Transfer ID ", _.transferId), + param("Vector clock", _.clock), + ) + + @transient override protected lazy val companionObj: CausalityMessage.type = CausalityMessage +} + +object CausalityMessage extends HasProtocolVersionedCompanion[CausalityMessage] { + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> VersionedProtoConverter(ProtocolVersion.v30)(v0.CausalityMessage)( + supportedProtoVersion(_)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + implicit val causalityMessageCast: ProtocolMessageContentCast[CausalityMessage] = + ProtocolMessageContentCast.create[CausalityMessage]("CausalityMessage") { + case cm: CausalityMessage => Some(cm) + case _ => None + } + + private[messages] def fromProtoV0(cmP: v0.CausalityMessage): ParsingResult[CausalityMessage] = { + val v0.CausalityMessage(domainIdP, transferIdP, clockPO) = cmP + for { + domainId <- DomainId.fromProtoPrimitive(domainIdP, "target_domain_id").map(TargetDomainId(_)) + clocks <- ProtoConverter.parseRequired(VectorClock.fromProtoV0, "clock", clockPO) + tid <- ProtoConverter.parseRequired(TransferId.fromProtoV0, "transfer_id", transferIdP) + } yield CausalityMessage( + domainId, + tid, + clocks, + )(protocolVersionRepresentativeFor(ProtoVersion(0))) + } + + override def name: String = "CausalityMessage" +} + +/** A vector clock represents the causal constraints that must be respected for a party at a certain point in time. + * Vector clocks are maintained per-domain + * + * @param sourceDomainId The domain of the vector clock + * @param localTs The timestamp on `sourceDomainId` specifying the time at which the causal constraints are valid + * @param partyId The party who has seen the causal information specified by `clock` + * @param clock The most recent timestamp on each domain that `partyId` has causally observed + */ +final case class VectorClock( + sourceDomainId: SourceDomainId, + localTs: CantonTimestamp, + partyId: LfPartyId, + clock: Map[DomainId, CantonTimestamp], +) extends PrettyPrinting { + + override def pretty: Pretty[VectorClock.this.type] = + prettyOfClass( + param("Domain for constraints ", _.sourceDomainId), + param("Most recent timestamps", _.clock), + param("Local timestamp", _.localTs), + param("Party", _.partyId), + ) + + private[messages] def toProtoV0: v0.VectorClock = { + v0.VectorClock( + originDomainId = sourceDomainId.toProtoPrimitive, + localTs = Some(localTs.toProtoPrimitive), + partyId = partyId, + clock = clock.map { case (did, cts) => did.toProtoPrimitive -> cts.toProtoPrimitive }, + ) + } +} + +object VectorClock { + private[messages] def fromProtoV0(vc: v0.VectorClock): ParsingResult[VectorClock] = { + val v0.VectorClock(did, ts, partyId, clock) = vc + for { + localTs <- ProtoConverter.parseRequired(CantonTimestamp.fromProtoPrimitive, "local_ts", ts) + sourceDomainId <- DomainId.fromProtoPrimitive(did, "origin_domain_id").map(SourceDomainId(_)) + party <- ProtoConverter.parseLfPartyId(partyId) + domainTimestamps <- clock.toList.traverse { case (kProto, vProto) => + for { + k <- DomainId.fromProtoPrimitive(kProto, "clock (key: DomainId)") + v <- CantonTimestamp.fromProtoPrimitive(vProto) + } yield k -> v + } + } yield { + VectorClock(sourceDomainId, localTs, party, domainTimestamps.toMap) + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/DomainTopologyTransactionMessage.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/DomainTopologyTransactionMessage.scala new file mode 100644 index 0000000000..afd9e7416e --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/DomainTopologyTransactionMessage.scala @@ -0,0 +1,290 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import cats.data.EitherT +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString.TopologyRequestId +import com.digitalasset.canton.config.CantonRequireTypes.String255 +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.protocol.messages.ProtocolMessage.ProtocolMessageContentCast +import com.digitalasset.canton.protocol.messages.TopologyTransactionsBroadcastX.Broadcast +import com.digitalasset.canton.protocol.{v1, v2, v4} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.{ + HasProtocolVersionedCompanion, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} +import com.google.common.annotations.VisibleForTesting + +import scala.concurrent.{ExecutionContext, Future} + +final case class DomainTopologyTransactionMessage private ( + domainTopologyManagerSignature: Signature, + transactions: List[SignedTopologyTransaction[TopologyChangeOp]], + notSequencedAfter: CantonTimestamp, + override val domainId: DomainId, +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + DomainTopologyTransactionMessage.type + ] +) extends UnsignedProtocolMessage { + + def hashToSign(hashOps: HashOps): Hash = + DomainTopologyTransactionMessage.hash( + transactions, + domainId, + notSequencedAfter, + hashOps, + ) + + private[messages] def toProtoV1: v1.DomainTopologyTransactionMessage = + v1.DomainTopologyTransactionMessage( + signature = Some(domainTopologyManagerSignature.toProtoV0), + transactions = transactions.map(_.getCryptographicEvidence), + domainId = domainId.toProtoPrimitive, + notSequencedAfter = Some(notSequencedAfter.toProtoPrimitive), + ) + + override def toProtoSomeEnvelopeContentV4: v4.EnvelopeContent.SomeEnvelopeContent = + v4.EnvelopeContent.SomeEnvelopeContent.DomainTopologyTransactionMessage(toProtoV1) + + @transient override protected lazy val companionObj: DomainTopologyTransactionMessage.type = + DomainTopologyTransactionMessage + + @VisibleForTesting + def replaceSignatureForTesting(signature: Signature): DomainTopologyTransactionMessage = { + copy(domainTopologyManagerSignature = signature)(representativeProtocolVersion) + } + +} + +object DomainTopologyTransactionMessage + extends HasProtocolVersionedCompanion[DomainTopologyTransactionMessage] { + + implicit val domainIdentityTransactionMessageCast + : ProtocolMessageContentCast[DomainTopologyTransactionMessage] = + ProtocolMessageContentCast.create[DomainTopologyTransactionMessage]( + "DomainTopologyTransactionMessage" + ) { + case dttm: DomainTopologyTransactionMessage => Some(dttm) + case _ => None + } + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)( + v1.DomainTopologyTransactionMessage + )( + supportedProtoVersion(_)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + private def hash( + transactions: List[SignedTopologyTransaction[TopologyChangeOp]], + domainId: DomainId, + notSequencedAfter: CantonTimestamp, + hashOps: HashOps, + ): Hash = { + val builder = hashOps + .build(HashPurpose.DomainTopologyTransactionMessageSignature) + .add(domainId.toProtoPrimitive) + + builder.add(notSequencedAfter.toEpochMilli) + + transactions.foreach(elem => builder.add(elem.getCryptographicEvidence)) + builder.finish() + } + + def create( + transactions: List[SignedTopologyTransaction[TopologyChangeOp]], + syncCrypto: DomainSnapshotSyncCryptoApi, + domainId: DomainId, + notSequencedAfter: CantonTimestamp, + protocolVersion: ProtocolVersion, + )(implicit + traceContext: TraceContext, + ec: ExecutionContext, + ): EitherT[Future, String, DomainTopologyTransactionMessage] = { + + val hashToSign = hash( + transactions, + domainId, + notSequencedAfter, + syncCrypto.crypto.pureCrypto, + ) + + for { + signature <- syncCrypto.sign(hashToSign).leftMap(_.toString) + domainTopologyTransactionMessageE = Either + .catchOnly[IllegalArgumentException]( + DomainTopologyTransactionMessage( + signature, + transactions, + notSequencedAfter = notSequencedAfter, + domainId, + )(protocolVersionRepresentativeFor(protocolVersion)) + ) + .leftMap(_.getMessage) + domainTopologyTransactionMessage <- EitherT.fromEither[Future]( + domainTopologyTransactionMessageE + ) + } yield domainTopologyTransactionMessage + } + + def tryCreate( + transactions: List[SignedTopologyTransaction[TopologyChangeOp]], + crypto: DomainSnapshotSyncCryptoApi, + domainId: DomainId, + notSequencedAfter: CantonTimestamp, + protocolVersion: ProtocolVersion, + )(implicit + traceContext: TraceContext, + ec: ExecutionContext, + ): Future[DomainTopologyTransactionMessage] = { + create(transactions, crypto, domainId, notSequencedAfter, protocolVersion).fold( + err => + throw new IllegalStateException( + s"Failed to create domain topology transaction message: $err" + ), + identity, + ) + } + + private[messages] def fromProtoV1( + message: v1.DomainTopologyTransactionMessage + ): ParsingResult[DomainTopologyTransactionMessage] = { + val v1.DomainTopologyTransactionMessage(signature, domainId, timestamp, transactions) = message + for { + succeededContent <- transactions.toList.traverse( + SignedTopologyTransaction.fromByteString + ) + signature <- ProtoConverter.parseRequired(Signature.fromProtoV0, "signature", signature) + domainUid <- UniqueIdentifier.fromProtoPrimitive(domainId, "domainId") + notSequencedAfter <- ProtoConverter.parseRequired( + CantonTimestamp.fromProtoPrimitive, + "not_sequenced_after", + timestamp, + ) + } yield DomainTopologyTransactionMessage( + signature, + succeededContent, + notSequencedAfter = notSequencedAfter, + DomainId(domainUid), + )(protocolVersionRepresentativeFor(ProtoVersion(1))) + } + + override def name: String = "DomainTopologyTransactionMessage" +} + +final case class TopologyTransactionsBroadcastX private ( + override val domainId: DomainId, + broadcasts: Seq[Broadcast], +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + TopologyTransactionsBroadcastX.type + ] +) extends UnsignedProtocolMessage { + + @transient override protected lazy val companionObj: TopologyTransactionsBroadcastX.type = + TopologyTransactionsBroadcastX + + override protected[messages] def toProtoSomeEnvelopeContentV4 + : v4.EnvelopeContent.SomeEnvelopeContent = + v4.EnvelopeContent.SomeEnvelopeContent.TopologyTransactionsBroadcast(toProtoV2) + + def toProtoV2: v2.TopologyTransactionsBroadcastX = v2.TopologyTransactionsBroadcastX( + domainId.toProtoPrimitive, + broadcasts = broadcasts.map(_.toProtoV2), + ) + +} + +object TopologyTransactionsBroadcastX + extends HasProtocolVersionedCompanion[ + TopologyTransactionsBroadcastX + ] { + + def create( + domainId: DomainId, + broadcasts: Seq[Broadcast], + protocolVersion: ProtocolVersion, + ): TopologyTransactionsBroadcastX = + TopologyTransactionsBroadcastX(domainId = domainId, broadcasts = broadcasts)( + supportedProtoVersions.protocolVersionRepresentativeFor(protocolVersion) + ) + + override def name: String = "TopologyTransactionsBroadcastX" + + implicit val acceptedTopologyTransactionXMessageCast + : ProtocolMessageContentCast[TopologyTransactionsBroadcastX] = + ProtocolMessageContentCast.create[TopologyTransactionsBroadcastX]( + name + ) { + case att: TopologyTransactionsBroadcastX => Some(att) + case _ => None + } + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(2) -> VersionedProtoConverter(ProtocolVersion.v30)( + v2.TopologyTransactionsBroadcastX + )( + supportedProtoVersion(_)(fromProtoV2), + _.toProtoV2.toByteString, + ) + ) + + private[messages] def fromProtoV2( + message: v2.TopologyTransactionsBroadcastX + ): ParsingResult[TopologyTransactionsBroadcastX] = { + val v2.TopologyTransactionsBroadcastX(domain, broadcasts) = message + for { + domainId <- DomainId.fromProtoPrimitive(domain, "domain") + broadcasts <- broadcasts.traverse(broadcastFromProtoV2) + } yield TopologyTransactionsBroadcastX(domainId, broadcasts.toList)( + protocolVersionRepresentativeFor(ProtoVersion(2)) + ) + } + + private def broadcastFromProtoV2( + message: v2.TopologyTransactionsBroadcastX.Broadcast + ): ParsingResult[Broadcast] = { + val v2.TopologyTransactionsBroadcastX.Broadcast(broadcastId, transactions) = message + for { + broadcastId <- String255.fromProtoPrimitive(broadcastId, "broadcast_id") + transactions <- transactions.traverse(SignedTopologyTransactionX.fromProtoV2) + } yield Broadcast(broadcastId, transactions.toList) + } + + final case class Broadcast( + broadcastId: TopologyRequestId, + transactions: List[SignedTopologyTransactionX[TopologyChangeOpX, TopologyMappingX]], + ) { + def toProtoV2: v2.TopologyTransactionsBroadcastX.Broadcast = + v2.TopologyTransactionsBroadcastX.Broadcast( + broadcastId = broadcastId.toProtoPrimitive, + transactions = transactions.map(_.toProtoV2), + ) + } + + /** The state of the submission of a topology transaction broadcast. In combination with the sequencer client + * send tracker capability, State reflects that either the sequencer Accepted the submission or that the submission + * was Rejected due to an error or a timeout. See DomainTopologyServiceX. + */ + sealed trait State extends Product with Serializable + + object State { + case object Failed extends State + + case object Accepted extends State + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/EncryptedViewMessage.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/EncryptedViewMessage.scala new file mode 100644 index 0000000000..88c3519a9b --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/EncryptedViewMessage.scala @@ -0,0 +1,575 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import cats.Functor +import cats.data.EitherT +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.ProtoDeserializationError.CryptoDeserializationError +import com.digitalasset.canton.crypto.SyncCryptoError.SyncCryptoDecryptionError +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.crypto.store.CryptoPrivateStoreError +import com.digitalasset.canton.crypto.store.CryptoPrivateStoreError.FailedToReadKey +import com.digitalasset.canton.data.ViewType +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.protocol.messages.EncryptedViewMessage.RecipientsInfo +import com.digitalasset.canton.protocol.messages.EncryptedViewMessageError.{ + SessionKeyCreationError, + SyncCryptoDecryptError, + WrongRandomnessLength, +} +import com.digitalasset.canton.protocol.messages.ProtocolMessage.ProtocolMessageContentCast +import com.digitalasset.canton.serialization.DeserializationError +import com.digitalasset.canton.serialization.ProtoConverter.{ParsingResult, parseRequiredNonEmpty} +import com.digitalasset.canton.store.SessionKeyStore +import com.digitalasset.canton.topology.{DomainId, ParticipantId, PartyId, UniqueIdentifier} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.* +import com.digitalasset.canton.version.* +import com.google.protobuf.ByteString + +import scala.concurrent.{ExecutionContext, Future} + +/** An encrypted [[com.digitalasset.canton.data.ViewTree]] together with its [[com.digitalasset.canton.data.ViewType]]. + * The correspondence is encoded via a path-dependent type. + * The type parameter `VT` exposes a upper bound on the type of view types that may be contained. + * + * The view tree is compressed before encryption. + */ +// This is not a case class due to the type dependency between `viewType` and `viewTree`. +// We therefore implement the case class boilerplate stuff to the extent needed. +sealed trait EncryptedView[+VT <: ViewType] extends Product with Serializable { + val viewType: VT + val viewTree: Encrypted[EncryptedView.CompressedView[viewType.View]] + + override def productArity: Int = 1 + override def productElement(n: Int): Any = n match { + case 0 => viewTree + case _ => throw new IndexOutOfBoundsException(s"Index out of range: $n") + } + @SuppressWarnings(Array("org.wartremover.warts.IsInstanceOf")) + override def canEqual(that: Any): Boolean = that.isInstanceOf[EncryptedView[_]] + @SuppressWarnings( + Array( + "org.wartremover.warts.AsInstanceOf", + "org.wartremover.warts.IsInstanceOf", + "org.wartremover.warts.Null", + ) + ) + override def equals(that: Any): Boolean = { + if (this eq that.asInstanceOf[Object]) true + else if (!that.isInstanceOf[EncryptedView[_]]) false + else { + val other = that.asInstanceOf[EncryptedView[ViewType]] + val thisViewTree = this.viewTree + if (thisViewTree eq null) other.viewTree eq null else thisViewTree == other.viewTree + } + } + override def hashCode(): Int = scala.runtime.ScalaRunTime._hashCode(this) + + /** Cast the type parameter to the given argument's [[com.digitalasset.canton.data.ViewType]] + * provided that the argument is the same as [[viewType]] + * @return [[scala.None$]] if `desiredViewType` does not equal [[viewType]]. + */ + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def select(desiredViewType: ViewType): Option[EncryptedView[desiredViewType.type]] = + // Unfortunately, there doesn't seem to be a way to convince Scala's type checker that the two types must be equal. + if (desiredViewType == viewType) Some(this.asInstanceOf[EncryptedView[desiredViewType.type]]) + else None + + /** Indicative size for pretty printing */ + def sizeHint: Int + +} +object EncryptedView { + def apply[VT <: ViewType]( + aViewType: VT + )(aViewTree: Encrypted[CompressedView[aViewType.View]]): EncryptedView[VT] = + new EncryptedView[VT] { + override val viewType: aViewType.type = aViewType + override val viewTree = aViewTree + override lazy val sizeHint: Int = aViewTree.ciphertext.size + } + + def compressed[VT <: ViewType]( + encryptionOps: EncryptionOps, + viewKey: SymmetricKey, + aViewType: VT, + version: ProtocolVersion, + )(aViewTree: aViewType.View): Either[EncryptionError, EncryptedView[VT]] = + encryptionOps + .encryptWith(CompressedView(aViewTree), viewKey, version) + .map(apply(aViewType)) + + def decrypt[VT <: ViewType]( + encryptionOps: EncryptionOps, + viewKey: SymmetricKey, + encrypted: EncryptedView[VT], + )( + deserialize: ByteString => Either[DeserializationError, encrypted.viewType.View] + ): Either[DecryptionError, encrypted.viewType.View] = + encryptionOps + .decryptWith(encrypted.viewTree, viewKey)( + CompressedView.fromByteString[encrypted.viewType.View](deserialize)(_) + ) + .map(_.value) + + /** Wrapper class to compress the view before encrypting it. + * + * This class's methods are essentially private to [[EncryptedView]] + * because compression is in theory non-deterministic (the gzip format can store a timestamp that is ignored by decryption) + * and we want to avoid that this is applied to [[com.digitalasset.canton.serialization.HasCryptographicEvidence]] + * instances. + */ + final case class CompressedView[+V <: HasVersionedToByteString] private (value: V) + extends HasVersionedToByteString { + override def toByteString(version: ProtocolVersion): ByteString = + ByteStringUtil.compressGzip(value.toByteString(version)) + } + + object CompressedView { + private[EncryptedView] def apply[V <: HasVersionedToByteString](value: V): CompressedView[V] = + new CompressedView(value) + + private[EncryptedView] def fromByteString[V <: HasVersionedToByteString]( + deserialize: ByteString => Either[DeserializationError, V] + )(bytes: ByteString): Either[DeserializationError, CompressedView[V]] = + // TODO(i10428) Make sure that this view does not explode into an arbitrarily large object + ByteStringUtil + .decompressGzip(bytes, maxBytesLimit = None) + .flatMap(deserialize) + .map(CompressedView(_)) + } + +} + +/** An encrypted view message. + * + * See [[https://engineering.da-int.net/docs/platform-architecture-handbook/arch/canton/tx-data-structures.html#transaction-hashes-and-views]] + * The view message encrypted with symmetric key that is derived from the view's randomness. + * + * @param viewHash Transaction view hash in plain text - included such that the recipient can prove to a 3rd party + * that it has correctly decrypted the `viewTree` + * @param randomness the view's randomness symmetrically encrypted with a session key. + * @param sessionKey a sequence of encrypted random values to each recipient of the view. + * These values are encrypted and are used to derive the symmetric session key. + * Instead of sending a , which could cause formatting issues + * (e.g. different participants with different providers and, therefore, different key formats), + * we send an encrypted . + */ +final case class EncryptedViewMessage[+VT <: ViewType]( + submitterParticipantSignature: Option[Signature], + viewHash: ViewHash, + randomness: Encrypted[SecureRandomness], + sessionKey: NonEmpty[Seq[AsymmetricEncrypted[SecureRandomness]]], + encryptedView: EncryptedView[VT], + override val domainId: DomainId, + viewEncryptionScheme: SymmetricKeyScheme, +)( + val recipientsInfo: Option[RecipientsInfo], + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + EncryptedViewMessage.type + ], +) extends UnsignedProtocolMessage { + + @transient override protected lazy val companionObj: EncryptedViewMessage.type = + EncryptedViewMessage + + def viewType: VT = encryptedView.viewType + + def copy[A <: ViewType]( + submitterParticipantSignature: Option[Signature] = this.submitterParticipantSignature, + viewHash: ViewHash = this.viewHash, + randomness: Encrypted[SecureRandomness] = this.randomness, + sessionKeyRandomness: NonEmpty[Seq[AsymmetricEncrypted[SecureRandomness]]] = this.sessionKey, + encryptedView: EncryptedView[A] = this.encryptedView, + domainId: DomainId = this.domainId, + viewEncryptionScheme: SymmetricKeyScheme = this.viewEncryptionScheme, + ): EncryptedViewMessage[A] = new EncryptedViewMessage( + submitterParticipantSignature, + viewHash, + randomness, + sessionKeyRandomness, + encryptedView, + domainId, + viewEncryptionScheme, + )(recipientsInfo, representativeProtocolVersion) + + private def toProtoV2: v2.EncryptedViewMessage = v2.EncryptedViewMessage( + viewTree = encryptedView.viewTree.ciphertext, + encryptionScheme = viewEncryptionScheme.toProtoEnum, + submitterParticipantSignature = submitterParticipantSignature.map(_.toProtoV0), + viewHash = viewHash.toProtoPrimitive, + randomness = randomness.ciphertext, + sessionKeyRandomness = sessionKey.map(EncryptedViewMessage.serializeSessionKeyEntry), + domainId = domainId.toProtoPrimitive, + viewType = viewType.toProtoEnum, + ) + + override def toProtoSomeEnvelopeContentV4: v4.EnvelopeContent.SomeEnvelopeContent = + v4.EnvelopeContent.SomeEnvelopeContent.EncryptedViewMessage(toProtoV2) + + protected def updateView[VT2 <: ViewType]( + newView: EncryptedView[VT2] + ): EncryptedViewMessage[VT2] = copy(encryptedView = newView) + + def toByteString: ByteString = toProtoV2.toByteString + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def traverse[F[_], VT2 <: ViewType]( + f: EncryptedView[VT] => F[EncryptedView[VT2]] + )(implicit F: Functor[F]): F[EncryptedViewMessage[VT2]] = { + F.map(f(encryptedView)) { newEncryptedView => + if (newEncryptedView eq encryptedView) this.asInstanceOf[EncryptedViewMessage[VT2]] + else updateView(newEncryptedView) + } + } + + override def pretty: Pretty[EncryptedViewMessage.this.type] = prettyOfClass( + param("view hash", _.viewHash), + param("view type", _.viewType), + param("size", _.encryptedView.sizeHint), + ) +} + +object EncryptedViewMessage extends HasProtocolVersionedCompanion[EncryptedViewMessage[ViewType]] { + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(2) -> VersionedProtoConverter(ProtocolVersion.v30)(v2.EncryptedViewMessage)( + supportedProtoVersion(_)(EncryptedViewMessage.fromProto), + _.toByteString, + ) + ) + + def apply[VT <: ViewType]( + submitterParticipantSignature: Option[Signature], + viewHash: ViewHash, + randomness: Encrypted[SecureRandomness], + sessionKey: NonEmpty[Seq[AsymmetricEncrypted[SecureRandomness]]], + encryptedView: EncryptedView[VT], + domainId: DomainId, + viewEncryptionScheme: SymmetricKeyScheme, + protocolVersion: ProtocolVersion, + )( + recipientsInfo: Option[RecipientsInfo] + ): EncryptedViewMessage[VT] = EncryptedViewMessage( + submitterParticipantSignature, + viewHash, + randomness, + sessionKey, + encryptedView, + domainId, + viewEncryptionScheme, + )(recipientsInfo, protocolVersionRepresentativeFor(protocolVersion)) + + private def serializeSessionKeyEntry( + encryptedSessionKey: AsymmetricEncrypted[SecureRandomness] + ): v2.SessionKeyLookup = { + v2.SessionKeyLookup( + sessionKeyRandomness = encryptedSessionKey.ciphertext, + fingerprint = encryptedSessionKey.encryptedFor.toProtoPrimitive, + ) + } + + private def deserializeSessionKeyEntry( + sessionKeyLookup: v2.SessionKeyLookup + ): ParsingResult[AsymmetricEncrypted[SecureRandomness]] = + for { + fingerprint <- Fingerprint.fromProtoPrimitive(sessionKeyLookup.fingerprint) + sessionKeyRandomness = sessionKeyLookup.sessionKeyRandomness + } yield AsymmetricEncrypted(sessionKeyRandomness, fingerprint) + + def fromProto( + encryptedViewMessageP: v2.EncryptedViewMessage + ): ParsingResult[EncryptedViewMessage[ViewType]] = { + val v2.EncryptedViewMessage( + viewTreeP, + encryptionSchemeP, + signatureP, + viewHashP, + randomnessP, + sessionKeyMapP, + domainIdP, + viewTypeP, + ) = + encryptedViewMessageP + for { + viewType <- ViewType.fromProtoEnum(viewTypeP) + viewEncryptionScheme <- SymmetricKeyScheme.fromProtoEnum( + "encryptionScheme", + encryptionSchemeP, + ) + signature <- signatureP.traverse(Signature.fromProtoV0) + viewTree <- Encrypted + .fromByteString[EncryptedView.CompressedView[viewType.View]](viewTreeP) + .leftMap(CryptoDeserializationError) + encryptedView = EncryptedView(viewType)(viewTree) + viewHash <- ViewHash.fromProtoPrimitive(viewHashP) + randomness <- Encrypted + .fromByteString[SecureRandomness](randomnessP) + .leftMap(CryptoDeserializationError) + sessionKeyRandomnessNE <- parseRequiredNonEmpty( + deserializeSessionKeyEntry, + "session key", + sessionKeyMapP, + ) + domainUid <- UniqueIdentifier.fromProtoPrimitive(domainIdP, "domainId") + } yield new EncryptedViewMessage( + signature, + viewHash, + randomness, + sessionKeyRandomnessNE, + encryptedView, + DomainId(domainUid), + viewEncryptionScheme, + )(None, protocolVersionRepresentativeFor(ProtoVersion(2))) + } + + def decryptRandomness[VT <: ViewType]( + snapshot: DomainSnapshotSyncCryptoApi, + sessionKeyStore: SessionKeyStore, + encrypted: EncryptedViewMessage[VT], + participantId: ParticipantId, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[Future, EncryptedViewMessageError, SecureRandomness] = { + val pureCrypto = snapshot.pureCrypto + + val randomnessLength = EncryptedViewMessage.computeRandomnessLength(pureCrypto) + + def decryptViewRandomness( + sessionKeyRandomness: SecureRandomness + ): EitherT[Future, EncryptedViewMessageError, SecureRandomness] = + for { + // derive symmetric key from randomness + sessionKey <- pureCrypto + .createSymmetricKey(sessionKeyRandomness, encrypted.viewEncryptionScheme) + .leftMap[EncryptedViewMessageError](SessionKeyCreationError) + .toEitherT[Future] + randomness <- pureCrypto + .decryptWith(encrypted.randomness, sessionKey)( + SecureRandomness.fromByteString(randomnessLength) + ) + .leftMap[EncryptedViewMessageError]( + EncryptedViewMessageError.SymmetricDecryptError + ) + .toEitherT[Future] + } yield randomness + + encrypted.sessionKey + .collectFirst { + case AsymmetricEncrypted(ciphertext, encryptedFor) + // if we're using no encryption, it means we're using group addressing + // which currently does not support encryption of the randomness + if encryptedFor == AsymmetricEncrypted.noEncryptionFingerprint => + SecureRandomness + .fromByteString(randomnessLength)(encrypted.randomness.ciphertext) + .leftMap[EncryptedViewMessageError](_ => + WrongRandomnessLength(ciphertext.size(), randomnessLength) + ) + .toEitherT[Future] + } + .getOrElse { + for { + /* We first need to check whether the target private encryption key exists and is active in the store; otherwise, + * we cannot decrypt and should abort. This situation can occur + * if an encryption key has been added to this participant's topology by another entity with the + * correct rights to do so, but this participant does not have the corresponding private key in the store. + */ + encryptionKeys <- EitherT + .right(snapshot.ipsSnapshot.encryptionKeys(participantId)) + .map(_.map(_.id).toSet) + encryptedSessionKeyForParticipant <- encrypted.sessionKey + .find(e => encryptionKeys.contains(e.encryptedFor)) + .toRight( + EncryptedViewMessageError.MissingParticipantKey(participantId) + ) + .toEitherT[Future] + _ <- snapshot.crypto.cryptoPrivateStore + .existsDecryptionKey(encryptedSessionKeyForParticipant.encryptedFor) + .leftMap(err => EncryptedViewMessageError.PrivateKeyStoreVerificationError(err)) + .subflatMap { + Either.cond( + _, + (), + EncryptedViewMessageError.PrivateKeyStoreVerificationError( + FailedToReadKey( + encryptedSessionKeyForParticipant.encryptedFor, + "matching private key does not exist", + ) + ), + ) + } + + // we get the randomness for the session key from the message or by searching the cache, + // which means that a previous view with the same recipients has been received before. + skRandom <- + // we try to search for the cached session key randomness. If it does not exist + // (or is disabled) we decrypt and store it + // the result in the cache. There is no need to sync on this read-write operation because + // there is not problem if the value gets re-written. + sessionKeyStore + .getSessionKeyRandomness( + snapshot.crypto.privateCrypto, + encrypted.viewEncryptionScheme.keySizeInBytes, + encryptedSessionKeyForParticipant, + ) + .leftMap[EncryptedViewMessageError](err => + SyncCryptoDecryptError( + SyncCryptoDecryptionError(err) + ) + ) + viewRandomness <- decryptViewRandomness(skRandom) + } yield viewRandomness + } + } + + final case class RecipientsInfo( + informeeParticipants: Set[ParticipantId], + partiesWithGroupAddressing: Set[PartyId], + participantsAddressedByGroupAddress: Set[ParticipantId], + ) + + private def eitherT[VT <: ViewType, B](value: Either[EncryptedViewMessageError, B])(implicit + ec: ExecutionContext + ): EitherT[Future, EncryptedViewMessageError, B] = + EitherT.fromEither[Future](value) + + def computeRandomnessLength(pureCrypto: CryptoPureApi): Int = + pureCrypto.defaultHashAlgorithm.length.toInt + + // This method is not defined as a member of EncryptedViewMessage because the covariant parameter VT conflicts + // with the parameter deserialize. + def decryptWithRandomness[VT <: ViewType]( + snapshot: DomainSnapshotSyncCryptoApi, + encrypted: EncryptedViewMessage[VT], + viewRandomness: SecureRandomness, + )(deserialize: ByteString => Either[DeserializationError, encrypted.encryptedView.viewType.View])( + implicit ec: ExecutionContext + ): EitherT[Future, EncryptedViewMessageError, VT#View] = { + + val pureCrypto = snapshot.pureCrypto + val viewKeyLength = encrypted.viewEncryptionScheme.keySizeInBytes + val randomnessLength = computeRandomnessLength(snapshot.pureCrypto) + + for { + _ <- EitherT.cond[Future]( + viewRandomness.unwrap.size == randomnessLength, + (), + EncryptedViewMessageError.WrongRandomnessLength( + viewRandomness.unwrap.size, + randomnessLength, + ), + ) + viewKeyRandomness <- + eitherT( + pureCrypto + .computeHkdf(viewRandomness.unwrap, viewKeyLength, HkdfInfo.ViewKey) + .leftMap(EncryptedViewMessageError.HkdfExpansionError) + ) + viewKey <- eitherT( + pureCrypto + .createSymmetricKey(viewKeyRandomness) + .leftMap(err => + EncryptedViewMessageError + .SymmetricDecryptError(DecryptionError.InvalidSymmetricKey(err.toString)) + ) + ) + decrypted <- eitherT( + EncryptedView + .decrypt(pureCrypto, viewKey, encrypted.encryptedView)(deserialize) + .leftMap(EncryptedViewMessageError.SymmetricDecryptError) + ) + _ <- eitherT( + EitherUtil.condUnitE( + decrypted.domainId == encrypted.domainId, + EncryptedViewMessageError.WrongDomainIdInEncryptedViewMessage( + encrypted.domainId, + decrypted.domainId, + ), + ) + ) + } yield decrypted + } + + def decryptFor[VT <: ViewType]( + snapshot: DomainSnapshotSyncCryptoApi, + sessionKeyStore: SessionKeyStore, + encrypted: EncryptedViewMessage[VT], + participantId: ParticipantId, + optViewRandomness: Option[SecureRandomness] = None, + )(deserialize: ByteString => Either[DeserializationError, encrypted.encryptedView.viewType.View])( + implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[Future, EncryptedViewMessageError, VT#View] = { + + val decryptedRandomness = + decryptRandomness(snapshot, sessionKeyStore, encrypted, participantId) + + for { + viewRandomness <- optViewRandomness.fold( + decryptedRandomness + )(r => EitherT.pure(r)) + decrypted <- decryptWithRandomness(snapshot, encrypted, viewRandomness)( + deserialize + ) + } yield decrypted + } + + implicit val encryptedViewMessageCast + : ProtocolMessageContentCast[EncryptedViewMessage[ViewType]] = + ProtocolMessageContentCast.create[EncryptedViewMessage[ViewType]]("EncryptedViewMessage") { + case evm: EncryptedViewMessage[_] => Some(evm) + case _ => None + } + + override def name: String = "EncryptedViewMessage" +} + +sealed trait EncryptedViewMessageError extends Product with Serializable with PrettyPrinting { + + override def pretty: Pretty[EncryptedViewMessageError.this.type] = adHocPrettyInstance +} + +object EncryptedViewMessageError { + + final case class SessionKeyCreationError( + keyCreationError: EncryptionKeyCreationError + ) extends EncryptedViewMessageError + + final case class MissingParticipantKey( + participantId: ParticipantId + ) extends EncryptedViewMessageError + + final case class SyncCryptoDecryptError( + syncCryptoError: SyncCryptoError + ) extends EncryptedViewMessageError + + final case class SymmetricDecryptError( + decryptError: DecryptionError + ) extends EncryptedViewMessageError + + final case class WrongDomainIdInEncryptedViewMessage( + declaredDomainId: DomainId, + containedDomainId: DomainId, + ) extends EncryptedViewMessageError + + final case class HkdfExpansionError( + cause: HkdfError + ) extends EncryptedViewMessageError + + final case class WrongRandomnessLength( + length: Int, + expectedLength: Int, + ) extends EncryptedViewMessageError + + final case class PrivateKeyStoreVerificationError( + privatekeyStoreError: CryptoPrivateStoreError + ) extends EncryptedViewMessageError +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/EnvelopeContent.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/EnvelopeContent.scala new file mode 100644 index 0000000000..d67aa2fa3b --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/EnvelopeContent.scala @@ -0,0 +1,104 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import cats.syntax.either.* +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.ProtoDeserializationError.OtherError +import com.digitalasset.canton.crypto.HashOps +import com.digitalasset.canton.protocol.messages.ProtocolMessage.ProtocolMessageContentCast +import com.digitalasset.canton.protocol.v4 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.version.* +import com.google.protobuf.ByteString + +final case class EnvelopeContent(message: UnsignedProtocolMessage)( + val representativeProtocolVersion: RepresentativeProtocolVersion[EnvelopeContent.type] +) extends HasProtocolVersionedWrapper[EnvelopeContent] { + @transient override protected lazy val companionObj: EnvelopeContent.type = EnvelopeContent + + def toByteStringUnversioned: ByteString = + v4.EnvelopeContent(message.toProtoSomeEnvelopeContentV4).toByteString +} + +object EnvelopeContent extends HasProtocolVersionedWithContextCompanion[EnvelopeContent, HashOps] { + + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(4) -> VersionedProtoConverter( + ProtocolVersion.v30 + )(v4.EnvelopeContent)( + supportedProtoVersion(_)(fromProtoV4), + _.toByteStringUnversioned, + ) + ) + + def create( + message: ProtocolMessage, + protocolVersion: ProtocolVersion, + ): Either[String, EnvelopeContent] = { + val representativeProtocolVersion = protocolVersionRepresentativeFor(protocolVersion) + message match { + case messageV4: UnsignedProtocolMessage => + Right(EnvelopeContent(messageV4)(representativeProtocolVersion)) + case _ => + Left(s"Cannot use message $message in protocol version $protocolVersion") + } + } + + def tryCreate( + message: ProtocolMessage, + protocolVersion: ProtocolVersion, + ): EnvelopeContent = + create(message, protocolVersion).valueOr(err => throw new IllegalArgumentException(err)) + + private def fromProtoV4( + hashOps: HashOps, + contentP: v4.EnvelopeContent, + ): ParsingResult[EnvelopeContent] = { + import v4.EnvelopeContent.SomeEnvelopeContent as Content + for { + content <- (contentP.someEnvelopeContent match { + case Content.InformeeMessage(messageP) => + InformeeMessage.fromProtoV1(hashOps)(messageP) + case Content.DomainTopologyTransactionMessage(messageP) => + DomainTopologyTransactionMessage.fromProtoV1(messageP) + case Content.EncryptedViewMessage(messageP) => + EncryptedViewMessage.fromProto(messageP) + case Content.TransferOutMediatorMessage(messageP) => + TransferOutMediatorMessage.fromProtoV1(hashOps)(messageP) + case Content.TransferInMediatorMessage(messageP) => + TransferInMediatorMessage.fromProtoV1(hashOps)(messageP) + case Content.RootHashMessage(messageP) => + RootHashMessage.fromProtoV0(SerializedRootHashMessagePayload.fromByteString)(messageP) + case Content.RegisterTopologyTransactionRequest(messageP) => + RegisterTopologyTransactionRequest.fromProtoV0(messageP) + case Content.RegisterTopologyTransactionResponse(messageP) => + RegisterTopologyTransactionResponse.fromProtoV1(messageP) + case Content.TopologyTransactionsBroadcast(messageP) => + TopologyTransactionsBroadcastX.fromProtoV2(messageP) + case Content.Empty => Left(OtherError("Cannot deserialize an empty message content")) + }): ParsingResult[UnsignedProtocolMessage] + } yield EnvelopeContent(content)(protocolVersionRepresentativeFor(ProtoVersion(4))) + } + + override def name: String = "EnvelopeContent" + + def messageFromByteArray[M <: UnsignedProtocolMessage]( + protocolVersion: ProtocolVersion, + hashOps: HashOps, + )( + bytes: Array[Byte] + )(implicit cast: ProtocolMessageContentCast[M]): ParsingResult[M] = { + for { + envelopeContent <- fromByteString(protocolVersion)(hashOps)(ByteString.copyFrom(bytes)) + message <- cast + .toKind(envelopeContent.message) + .toRight( + ProtoDeserializationError.OtherError( + s"Cannot deserialize ${envelopeContent.message} as a ${cast.targetKind}" + ) + ) + } yield message + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/HasDomainId.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/HasDomainId.scala new file mode 100644 index 0000000000..d7e41f74d4 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/HasDomainId.scala @@ -0,0 +1,10 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import com.digitalasset.canton.topology.DomainId + +trait HasDomainId { + def domainId: DomainId +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/HasRequestId.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/HasRequestId.scala new file mode 100644 index 0000000000..8ed807e899 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/HasRequestId.scala @@ -0,0 +1,10 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import com.digitalasset.canton.protocol.RequestId + +trait HasRequestId { + def requestId: RequestId +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/InformeeMessage.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/InformeeMessage.scala new file mode 100644 index 0000000000..c89c8632b1 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/InformeeMessage.scala @@ -0,0 +1,138 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import com.digitalasset.canton.LfPartyId +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.crypto.HashOps +import com.digitalasset.canton.data.{FullInformeeTree, Informee, ViewPosition, ViewType} +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.protocol.messages.ProtocolMessage.ProtocolMessageContentCast +import com.digitalasset.canton.protocol.{RequestId, RootHash, ViewHash, v1, v4} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.{DomainId, MediatorRef} +import com.digitalasset.canton.version.{ + HasProtocolVersionedWithContextCompanion, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} + +import java.util.UUID + +/** The informee message to be sent to the mediator. + */ +// This class is a reference example of serialization best practices. +// It is a simple example for getting started with serialization. +// Please consult the team if you intend to change the design of serialization. +@SuppressWarnings(Array("org.wartremover.warts.FinalCaseClass")) // This class is mocked in tests +case class InformeeMessage(fullInformeeTree: FullInformeeTree)( + protocolVersion: ProtocolVersion +) extends MediatorRequest + // By default, we use ProtoBuf for serialization. + // Serializable classes that have a corresponding Protobuf message should inherit from this trait to inherit common code and naming conventions. + // If the corresponding Protobuf message of a class has multiple versions (e.g. `v0.InformeeMessage` and `v1.InformeeMessage`), + with UnsignedProtocolMessage { + + override val representativeProtocolVersion: RepresentativeProtocolVersion[InformeeMessage.type] = + InformeeMessage.protocolVersionRepresentativeFor(protocolVersion) + + def copy(fullInformeeTree: FullInformeeTree = this.fullInformeeTree): InformeeMessage = + InformeeMessage(fullInformeeTree)(protocolVersion) + + override def requestUuid: UUID = fullInformeeTree.transactionUuid + + override def domainId: DomainId = fullInformeeTree.domainId + + override def mediator: MediatorRef = fullInformeeTree.mediator + + override def informeesAndThresholdByViewHash: Map[ViewHash, (Set[Informee], NonNegativeInt)] = + fullInformeeTree.informeesAndThresholdByViewHash + + override def informeesAndThresholdByViewPosition + : Map[ViewPosition, (Set[Informee], NonNegativeInt)] = + fullInformeeTree.informeesAndThresholdByViewPosition + + override def createMediatorResult( + requestId: RequestId, + verdict: Verdict, + recipientParties: Set[LfPartyId], + ): TransactionResultMessage = + TransactionResultMessage( + requestId, + verdict, + fullInformeeTree.tree.rootHash, + domainId, + protocolVersion, + ) + + // Implementing a `toProto` method allows us to compose serializable classes. + // You should define the toProtoV0 method on the serializable class, because then it is easiest to find and use. + // (Conversely, you should not define a separate proto converter class.) + def toProtoV1: v1.InformeeMessage = + v1.InformeeMessage( + fullInformeeTree = Some(fullInformeeTree.toProtoV1), + protocolVersion = protocolVersion.toProtoPrimitive, + ) + + override def toProtoSomeEnvelopeContentV4: v4.EnvelopeContent.SomeEnvelopeContent = + v4.EnvelopeContent.SomeEnvelopeContent.InformeeMessage(toProtoV1) + + override def minimumThreshold(informees: Set[Informee]): NonNegativeInt = + fullInformeeTree.confirmationPolicy.minimumThreshold(informees) + + override def rootHash: Option[RootHash] = Some(fullInformeeTree.transactionId.toRootHash) + + override def viewType: ViewType = ViewType.TransactionViewType + + override def pretty: Pretty[InformeeMessage] = prettyOfClass(unnamedParam(_.fullInformeeTree)) + + @transient override protected lazy val companionObj: InformeeMessage.type = InformeeMessage +} + +object InformeeMessage extends HasProtocolVersionedWithContextCompanion[InformeeMessage, HashOps] { + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)(v1.InformeeMessage)( + supportedProtoVersion(_)((hashOps, proto) => fromProtoV1(hashOps)(proto)), + _.toProtoV1.toByteString, + ) + ) + + // The inverse of "toProto". + // + // On error, it returns `Left(...)` as callers cannot predict whether the conversion would succeed. + // So the caller is forced to handle failing conversion. + // Conversely, the method absolutely must not throw an exception, because this will likely kill the calling thread. + // So it would be a DOS vulnerability. + // + // There is no agreed convention on which type to use for errors. In this class it is "ProtoDeserializationError", + // but other classes use something else (e.g. "String"). + // In the end, it is most important that the errors are informative and this can be achieved in different ways. + private[messages] def fromProtoV1( + hashOps: HashOps + )(informeeMessageP: v1.InformeeMessage): ParsingResult[InformeeMessage] = { + // Use pattern matching to access the fields of v0.InformeeMessage, + // because this will break if a field is forgotten. + val v1.InformeeMessage(maybeFullInformeeTreeP, protocolVersionP) = informeeMessageP + for { + // Keep in mind that all fields of a proto class are optional. So the existence must be checked explicitly. + fullInformeeTreeP <- ProtoConverter.required( + "InformeeMessage.informeeTree", + maybeFullInformeeTreeP, + ) + fullInformeeTree <- FullInformeeTree.fromProtoV1(hashOps, fullInformeeTreeP) + protocolVersion <- ProtocolVersion.fromProtoPrimitive(protocolVersionP) + } yield new InformeeMessage(fullInformeeTree)(protocolVersion) + } + + implicit val informeeMessageCast: ProtocolMessageContentCast[InformeeMessage] = + ProtocolMessageContentCast.create[InformeeMessage]("InformeeMessage") { + case im: InformeeMessage => Some(im) + case _ => None + } + + override def name: String = "InformeeMessage" +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/LocalVerdict.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/LocalVerdict.scala new file mode 100644 index 0000000000..d2dee42ffe --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/LocalVerdict.scala @@ -0,0 +1,746 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import com.daml.error.* +import com.digitalasset.canton.ProtoDeserializationError.OtherError +import com.digitalasset.canton.error.CantonErrorGroups.ParticipantErrorGroup.TransactionErrorGroup.LocalRejectionGroup +import com.digitalasset.canton.error.* +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.messages.LocalReject.MalformedRejects.CreatesExistingContracts +import com.digitalasset.canton.protocol.messages.LocalVerdict.protocolVersionRepresentativeFor +import com.digitalasset.canton.protocol.{messages, v0, v1} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.version.* +import com.google.protobuf.empty +import org.slf4j.event.Level + +/** Possible verdicts on a transaction view from the participant's perspective. + * The verdict can be `LocalApprove`, `LocalReject` or `Malformed`. + * The verdicts `LocalReject` and `Malformed` include a `reason` pointing out which checks in Phase 3 have failed. + */ +sealed trait LocalVerdict + extends Product + with Serializable + with PrettyPrinting + with HasProtocolVersionedWrapper[LocalVerdict] { + private[messages] def toProtoV1: v1.LocalVerdict + + @transient override protected lazy val companionObj: LocalVerdict.type = LocalVerdict + + override def representativeProtocolVersion: RepresentativeProtocolVersion[LocalVerdict.type] +} + +object LocalVerdict extends HasProtocolVersionedCompanion[LocalVerdict] { + + override def name: String = getClass.getSimpleName + + override def supportedProtoVersions: messages.LocalVerdict.SupportedProtoVersions = + SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)(v1.LocalVerdict)( + supportedProtoVersion(_)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + private[messages] def fromProtoV1(localVerdictP: v1.LocalVerdict): ParsingResult[LocalVerdict] = { + import v1.LocalVerdict.{SomeLocalVerdict as Lv} + + val protocolVersion = protocolVersionRepresentativeFor(ProtoVersion(1)) + val v1.LocalVerdict(someLocalVerdictP) = localVerdictP + + someLocalVerdictP match { + case Lv.LocalApprove(empty.Empty(_)) => Right(LocalApprove()(protocolVersion)) + case Lv.LocalReject(localRejectP) => LocalReject.fromProtoV1(localRejectP) + case Lv.Empty => + Left(OtherError("Unable to deserialize LocalVerdict, as the content is empty")) + } + } +} + +final case class LocalApprove()( + override val representativeProtocolVersion: RepresentativeProtocolVersion[LocalVerdict.type] +) extends LocalVerdict { + private[messages] def toProtoV1: v1.LocalVerdict = + v1.LocalVerdict(v1.LocalVerdict.SomeLocalVerdict.LocalApprove(empty.Empty())) + + override def pretty: Pretty[this.type] = prettyOfClass() +} + +object LocalApprove { + def apply(protocolVersion: ProtocolVersion): LocalApprove = + LocalApprove()(LocalVerdict.protocolVersionRepresentativeFor(protocolVersion)) +} + +/** Base type for error codes related to local reject. + */ +trait BaseLocalRejectErrorCode { + + /** The code of a LocalReject in proto format v0. + * This is used to serialize rejections to v0.LocalReject. + */ + def v0CodeP: v0.LocalReject.Code +} + +/** Base type for ErrorCodes related to LocalReject, if the rejection does not (necessarily) occur due to malicious behavior. + */ +abstract class LocalRejectErrorCode( + id: String, + category: ErrorCategory, + override val v0CodeP: v0.LocalReject.Code, +)(implicit parent: ErrorClass) + extends ErrorCode(id, category) + with BaseLocalRejectErrorCode { + override implicit val code: LocalRejectErrorCode = this +} + +/** Base type for ErrorCodes related to LocalReject, if the rejection is due to malicious behavior. + */ +abstract class MalformedErrorCode(id: String, override val v0CodeP: v0.LocalReject.Code)(implicit + parent: ErrorClass +) extends AlarmErrorCode(id) + with BaseLocalRejectErrorCode { + implicit override val code: MalformedErrorCode = this +} + +sealed trait LocalReject extends LocalVerdict with TransactionError with TransactionRejection { + + /** The first part of the cause. Typically the same for all instances of the particular type. + */ + // The leading underscore will exclude the field from the error context, so that it doesn't get logged twice. + def _causePrefix: String + + /** The second part of the cause. Typically a class parameter. + */ + def _details: String = "" + + override def cause: String = _causePrefix + _details + + // Make sure the ErrorCode has a v0CodeP. + override def code: ErrorCode with BaseLocalRejectErrorCode + + /** Make sure to define this, if _resources is non-empty. + */ + def _resourcesType: Option[ErrorResource] = None + + /** The affected resources. + * Will be logged as part of the context information. + * If this error is converted to an rpc Status, this field is included as com.google.rpc.ResourceInfo. + */ + def _resources: Seq[String] = Seq() + + override def resources: Seq[(ErrorResource, String)] = + _resourcesType.fold(Seq.empty[(ErrorResource, String)])(rt => _resources.map(rs => (rt, rs))) + + protected[messages] def toProtoV1: v1.LocalVerdict = + v1.LocalVerdict(v1.LocalVerdict.SomeLocalVerdict.LocalReject(toLocalRejectProtoV1)) + + protected[messages] def toLocalRejectProtoV1: v1.LocalReject = + v1.LocalReject( + causePrefix = _causePrefix, + details = _details, + resource = _resources, + errorCode = code.id, + errorCategory = code.category.asInt, + ) + + override def pretty: Pretty[LocalReject] = + prettyOfClass( + param("code", _.code.id.unquoted), + param("causePrefix", _._causePrefix.doubleQuoted), + param("details", _._details.doubleQuoted, _._details.nonEmpty), + param("resources", _._resources.map(_.singleQuoted)), + paramIfDefined("throwable", _.throwableO), + ) +} + +/** Base class for LocalReject errors, if the rejection does not (necessarily) occur due to malicious behavior. + */ +sealed abstract class LocalRejectImpl( + override val _causePrefix: String, + override val _details: String = "", + override val throwableO: Option[Throwable] = None, + override val _resourcesType: Option[ErrorResource] = None, + override val _resources: Seq[String] = Seq.empty, +)(implicit override val code: LocalRejectErrorCode) + extends LocalReject + +/** Base class for LocalReject errors, if the rejection occurs due to malicious behavior. + */ +sealed abstract class Malformed( + override val _causePrefix: String, + override val _details: String = "", + override val throwableO: Option[Throwable] = None, + override val _resourcesType: Option[ErrorResource] = None, + override val _resources: Seq[String] = Seq.empty, +)(implicit + override val code: MalformedErrorCode +) extends BaseAlarm + with LocalReject + +object LocalReject extends LocalRejectionGroup { + + // list of local errors, used to map them during transport + // if you add a new error below, you must add it to this list here as well + private[messages] def fromProtoV1(localRejectP: v1.LocalReject): ParsingResult[LocalReject] = { + import ConsistencyRejections.* + val v1.LocalReject(causePrefix, details, resource, errorCodeP, errorCategoryP) = localRejectP + val protocolVersion = protocolVersionRepresentativeFor(ProtoVersion(1)) + errorCodeP match { + case LockedContracts.id => Right(LockedContracts.Reject(resource)(protocolVersion)) + case LockedKeys.id => Right(LockedKeys.Reject(resource)(protocolVersion)) + case InactiveContracts.id => Right(InactiveContracts.Reject(resource)(protocolVersion)) + case DuplicateKey.id => Right(DuplicateKey.Reject(resource)(protocolVersion)) + case CreatesExistingContracts.id => + Right(CreatesExistingContracts.Reject(resource)(protocolVersion)) + case TimeRejects.LedgerTime.id => + Right(TimeRejects.LedgerTime.Reject(details)(protocolVersion)) + case TimeRejects.SubmissionTime.id => + Right(TimeRejects.SubmissionTime.Reject(details)(protocolVersion)) + case TimeRejects.LocalTimeout.id => Right(TimeRejects.LocalTimeout.Reject()(protocolVersion)) + case MalformedRejects.MalformedRequest.id => + Right(MalformedRejects.MalformedRequest.Reject(details)(protocolVersion)) + case MalformedRejects.Payloads.id => + Right(MalformedRejects.Payloads.Reject(details)(protocolVersion)) + case MalformedRejects.ModelConformance.id => + Right(MalformedRejects.ModelConformance.Reject(details)(protocolVersion)) + case MalformedRejects.BadRootHashMessages.id => + Right(MalformedRejects.BadRootHashMessages.Reject(details)(protocolVersion)) + case TransferOutRejects.ActivenessCheckFailed.id => + Right(TransferOutRejects.ActivenessCheckFailed.Reject(details)(protocolVersion)) + case TransferInRejects.AlreadyCompleted.id => + Right(TransferInRejects.AlreadyCompleted.Reject(details)(protocolVersion)) + case TransferInRejects.ContractAlreadyActive.id => + Right(TransferInRejects.ContractAlreadyActive.Reject(details)(protocolVersion)) + case TransferInRejects.ContractAlreadyArchived.id => + Right(TransferInRejects.ContractAlreadyArchived.Reject(details)(protocolVersion)) + case TransferInRejects.ContractIsLocked.id => + Right(TransferInRejects.ContractIsLocked.Reject(details)(protocolVersion)) + case InconsistentKey.id => Right(InconsistentKey.Reject(resource)(protocolVersion)) + case id => + val category = ErrorCategory + .fromInt(errorCategoryP) + .getOrElse(ErrorCategory.SystemInternalAssumptionViolated) + Right(GenericReject(causePrefix, details, resource, id, category)(protocolVersion)) + } + } + + object ConsistencyRejections extends ErrorGroup() { + @Explanation( + """The transaction is referring to locked contracts which are in the process of being + created, transferred, or archived by another transaction. If the other transaction fails, this transaction could be successfully retried.""" + ) + @Resolution("Retry the transaction") + object LockedContracts + extends LocalRejectErrorCode( + id = "LOCAL_VERDICT_LOCKED_CONTRACTS", + ErrorCategory.ContentionOnSharedResources, + v0.LocalReject.Code.LockedContracts, + ) { + + final case class Reject(override val _resources: Seq[String])( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + LocalVerdict.type + ] + ) extends LocalRejectImpl( + _causePrefix = s"Rejected transaction is referring to locked contracts ", + _resourcesType = Some(CantonErrorResource.ContractId), + ) + + object Reject { + def apply(resources: Seq[String], protocolVersion: ProtocolVersion): Reject = + Reject(resources)(LocalVerdict.protocolVersionRepresentativeFor(protocolVersion)) + } + } + + @Explanation( + """The transaction is referring to locked keys which are in the process of being + modified by another transaction.""" + ) + @Resolution("Retry the transaction") + object LockedKeys + extends LocalRejectErrorCode( + id = "LOCAL_VERDICT_LOCKED_KEYS", + ErrorCategory.ContentionOnSharedResources, + v0.LocalReject.Code.LockedKeys, + ) { + final case class Reject(override val _resources: Seq[String])( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + LocalVerdict.type + ] + ) extends LocalRejectImpl( + _causePrefix = "Rejected transaction is referring to locked keys ", + _resourcesType = Some(CantonErrorResource.ContractKey), + _details = _resources.mkString(", "), + ) + + object Reject { + def apply(resources: Seq[String], protocolVersion: ProtocolVersion): Reject = + Reject(resources)(LocalVerdict.protocolVersionRepresentativeFor(protocolVersion)) + } + } + + @Explanation( + """The transaction is referring to contracts that have either been previously + archived, transferred to another domain, or do not exist.""" + ) + @Resolution("Inspect your contract state and try a different transaction.") + object InactiveContracts + extends LocalRejectErrorCode( + id = "LOCAL_VERDICT_INACTIVE_CONTRACTS", + ErrorCategory.InvalidGivenCurrentSystemStateResourceMissing, + v0.LocalReject.Code.InactiveContracts, + ) { + final case class Reject(override val _resources: Seq[String])( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + LocalVerdict.type + ] + ) extends LocalRejectImpl( + _causePrefix = "Rejected transaction is referring to inactive contracts ", + _resourcesType = Some(CantonErrorResource.ContractId), + ) + + object Reject { + def apply(resources: Seq[String], protocolVersion: ProtocolVersion): Reject = + Reject(resources)(LocalVerdict.protocolVersionRepresentativeFor(protocolVersion)) + } + } + + @Explanation( + """If the participant provides unique contract key support, + this error will indicate that a transaction would create a unique key which already exists.""" + ) + @Resolution( + "It depends on your use case and application whether and when retrying makes sense or not." + ) + object DuplicateKey + extends LocalRejectErrorCode( + id = "LOCAL_VERDICT_DUPLICATE_KEY", + ErrorCategory.InvalidGivenCurrentSystemStateResourceExists, + v0.LocalReject.Code.DuplicateKey, + ) { + final case class Reject(override val _resources: Seq[String])( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + LocalVerdict.type + ] + ) + // Error message contains the term: "Inconsistent" and "DuplicateKey" to avoid failing contract key ledger api conformance tests + extends LocalRejectImpl( + _causePrefix = + "Inconsistent rejected transaction would create a key that already exists (DuplicateKey) ", + _resourcesType = Some(CantonErrorResource.ContractKey), + ) + + object Reject { + def apply(resources: Seq[String], protocolVersion: ProtocolVersion): Reject = + Reject(resources)(LocalVerdict.protocolVersionRepresentativeFor(protocolVersion)) + } + } + + @Explanation( + """If the participant provides unique contract key support, + this error will indicate that a transaction expected a key to be unallocated, but a contract for the key already exists.""" + ) + @Resolution( + "It depends on your use case and application whether and when retrying makes sense or not." + ) + object InconsistentKey + extends LocalRejectErrorCode( + id = "LOCAL_VERDICT_INCONSISTENT_KEY", + ErrorCategory.InvalidGivenCurrentSystemStateResourceExists, + v0.LocalReject.Code.InconsistentKey, + ) { + final case class Reject(override val _resources: Seq[String])( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + LocalVerdict.type + ] + ) extends LocalRejectImpl( + _causePrefix = + "Inconsistent rejected transaction expected unassigned key, which already exists ", + _resourcesType = Some(CantonErrorResource.ContractKey), + ) + + object Reject { + def apply(resources: Seq[String], protocolVersion: ProtocolVersion): Reject = + Reject(resources)(LocalVerdict.protocolVersionRepresentativeFor(protocolVersion)) + } + } + } + + object TimeRejects extends ErrorGroup() { + + @Explanation( + """This error is thrown if the ledger time and the record time differ more than permitted. + This can happen in an overloaded system due to high latencies or for transactions with long interpretation times.""" + ) + @Resolution( + "For long-running transactions, specify a ledger time with the command submission or adjust the dynamic domain parameter ledgerTimeRecordTimeTolerance (and possibly the participant and mediator reaction timeout). For short-running transactions, simply retry." + ) + object LedgerTime + extends LocalRejectErrorCode( + id = "LOCAL_VERDICT_LEDGER_TIME_OUT_OF_BOUND", + ErrorCategory.ContentionOnSharedResources, + v0.LocalReject.Code.LedgerTime, + ) { + final case class Reject(override val _details: String)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + LocalVerdict.type + ] + ) extends LocalRejectImpl( + _causePrefix = + "Rejected transaction as delta of the ledger time and the record time exceed the time tolerance " + ) + + object Reject { + def apply(details: String, protocolVersion: ProtocolVersion): Reject = + Reject(details)(LocalVerdict.protocolVersionRepresentativeFor(protocolVersion)) + } + } + + @Explanation( + """This error is thrown if the submission time and the record time differ more than permitted. + This can happen in an overloaded system due to high latencies or for transactions with long interpretation times.""" + ) + @Resolution( + "For long-running transactions, adjust the ledger time bounds used with the command submission. For short-running transactions, simply retry." + ) + object SubmissionTime + extends LocalRejectErrorCode( + id = "LOCAL_VERDICT_SUBMISSION_TIME_OUT_OF_BOUND", + ErrorCategory.ContentionOnSharedResources, + v0.LocalReject.Code.SubmissionTime, + ) { + final case class Reject(override val _details: String)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + LocalVerdict.type + ] + ) extends LocalRejectImpl( + _causePrefix = + "Rejected transaction as delta of the submission time and the record time exceed the time tolerance " + ) + + object Reject { + def apply(details: String, protocolVersion: ProtocolVersion): Reject = + Reject(details)(LocalVerdict.protocolVersionRepresentativeFor(protocolVersion)) + } + } + + @Explanation( + """This rejection is sent if the participant locally determined a timeout.""" + ) + @Resolution("""In the first instance, resubmit your transaction. + | If the rejection still appears spuriously, consider increasing the `participantResponseTimeout` or + | `mediatorReactionTimeout` values in the `DynamicDomainParameters`. + | If the rejection appears unrelated to timeout settings, validate that the sequencer and mediator + | function correctly. + |""") + object LocalTimeout + extends LocalRejectErrorCode( + id = "LOCAL_VERDICT_TIMEOUT", + ErrorCategory.ContentionOnSharedResources, + v0.LocalReject.Code.LocalTimeout, + ) { + override def logLevel: Level = Level.WARN + final case class Reject()( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + LocalVerdict.type + ] + ) extends LocalRejectImpl( + _causePrefix = "Rejected transaction due to a participant determined timeout " + ) + + object Reject { + def apply(protocolVersion: ProtocolVersion): Reject = + Reject()(LocalVerdict.protocolVersionRepresentativeFor(protocolVersion)) + } + } + + } + + object MalformedRejects extends ErrorGroup() { + + @Explanation( + """This rejection is made by a participant if a request is malformed.""" + ) + @Resolution("Please contact support.") + object MalformedRequest + extends MalformedErrorCode( + id = "LOCAL_VERDICT_MALFORMED_REQUEST", + v0.LocalReject.Code.MalformedPayloads, + ) { + final case class Reject(override val _details: String)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + LocalVerdict.type + ] + ) extends Malformed(_causePrefix = "") + + object Reject { + def apply(details: String, protocolVersion: ProtocolVersion): Reject = + Reject(details)(LocalVerdict.protocolVersionRepresentativeFor(protocolVersion)) + } + } + + @Explanation( + """This rejection is made by a participant if a view of the transaction is malformed.""" + ) + @Resolution("This indicates either malicious or faulty behaviour.") + object Payloads + extends MalformedErrorCode( + id = "LOCAL_VERDICT_MALFORMED_PAYLOAD", + v0.LocalReject.Code.MalformedPayloads, + ) { + final case class Reject(override val _details: String)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + LocalVerdict.type + ] + ) extends Malformed( + _causePrefix = "Rejected transaction due to malformed payload within views " + ) + + object Reject { + def apply(details: String, protocolVersion: ProtocolVersion): Reject = + Reject(details)(LocalVerdict.protocolVersionRepresentativeFor(protocolVersion)) + } + } + + @Explanation( + """This rejection is made by a participant if a transaction fails a model conformance check.""" + ) + @Resolution("This indicates either malicious or faulty behaviour.") + object ModelConformance + extends MalformedErrorCode( + id = "LOCAL_VERDICT_FAILED_MODEL_CONFORMANCE_CHECK", + v0.LocalReject.Code.MalformedModel, + ) { + final case class Reject(override val _details: String)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + LocalVerdict.type + ] + ) extends Malformed( + _causePrefix = "Rejected transaction due to a failed model conformance check: " + ) + + object Reject { + def apply(details: String, protocolVersion: ProtocolVersion): Reject = + Reject(details)(LocalVerdict.protocolVersionRepresentativeFor(protocolVersion)) + } + } + + @Explanation( + """This rejection is made by a participant if a transaction does not contain valid root hash messages.""" + ) + @Resolution( + "This indicates a race condition due to a in-flight topology change, or malicious or faulty behaviour." + ) + object BadRootHashMessages + extends MalformedErrorCode( + id = "LOCAL_VERDICT_BAD_ROOT_HASH_MESSAGES", + v0.LocalReject.Code.BadRootHashMessage, + ) { + final case class Reject(override val _details: String)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + LocalVerdict.type + ] + ) extends Malformed( + _causePrefix = "Rejected transaction due to bad root hash error messages. " + ) + + object Reject { + def apply(details: String, protocolVersion: ProtocolVersion): Reject = + Reject(details)(LocalVerdict.protocolVersionRepresentativeFor(protocolVersion)) + } + } + + @Explanation( + """This error indicates that the transaction would create already existing contracts.""" + ) + @Resolution("This error indicates either faulty or malicious behaviour.") + object CreatesExistingContracts + extends MalformedErrorCode( + id = "LOCAL_VERDICT_CREATES_EXISTING_CONTRACTS", + v0.LocalReject.Code.CreatesExistingContract, + ) { + final case class Reject(override val _resources: Seq[String])( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + LocalVerdict.type + ] + ) extends Malformed( + _causePrefix = "Rejected transaction would create contract(s) that already exist ", + _resourcesType = Some(CantonErrorResource.ContractId), + ) + + object Reject { + def apply(resources: Seq[String], protocolVersion: ProtocolVersion): Reject = + Reject(resources)(LocalVerdict.protocolVersionRepresentativeFor(protocolVersion)) + } + } + } + + object TransferOutRejects extends ErrorGroup() { + + @Explanation( + """Activeness check failed for transfer out submission. This rejection occurs if the contract to be + |transferred has already been transferred or is currently locked (due to a competing transaction) + |on domain.""" + ) + @Resolution( + "Depending on your use-case and your expectation, retry the transaction." + ) + object ActivenessCheckFailed + extends LocalRejectErrorCode( + id = "TRANSFER_OUT_ACTIVENESS_CHECK_FAILED", + ErrorCategory.InvalidGivenCurrentSystemStateResourceMissing, + v0.LocalReject.Code.TransferOutActivenessCheck, + ) { + + final case class Reject(override val _details: String)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + LocalVerdict.type + ] + ) extends LocalRejectImpl(_causePrefix = "Activeness check failed.") + + object Reject { + def apply(details: String, protocolVersion: ProtocolVersion): Reject = + Reject(details)(LocalVerdict.protocolVersionRepresentativeFor(protocolVersion)) + } + } + + } + + object TransferInRejects extends ErrorGroup() { + @Explanation( + """This rejection is emitted by a participant if a transfer would be invoked on an already archived contract.""" + ) + object ContractAlreadyArchived + extends LocalRejectErrorCode( + id = "TRANSFER_IN_CONTRACT_ALREADY_ARCHIVED", + ErrorCategory.InvalidGivenCurrentSystemStateResourceMissing, + v0.LocalReject.Code.TransferInAlreadyArchived, + ) { + + final case class Reject(override val _details: String)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + LocalVerdict.type + ] + ) extends LocalRejectImpl( + _causePrefix = "Rejected transfer as transferred contract is already archived. " + ) + + object Reject { + def apply(details: String, protocolVersion: ProtocolVersion): Reject = + Reject(details)(LocalVerdict.protocolVersionRepresentativeFor(protocolVersion)) + } + } + + @Explanation( + """This rejection is emitted by a participant if a transfer-in has already been made by another entity.""" + ) + object ContractAlreadyActive + extends LocalRejectErrorCode( + id = "TRANSFER_IN_CONTRACT_ALREADY_ACTIVE", + ErrorCategory.InvalidGivenCurrentSystemStateResourceExists, + v0.LocalReject.Code.TransferInAlreadyActive, + ) { + + final case class Reject(override val _details: String)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + LocalVerdict.type + ] + ) extends LocalRejectImpl( + _causePrefix = + "Rejected transfer as the contract is already active on the target domain. " + ) + + object Reject { + def apply(details: String, protocolVersion: ProtocolVersion): Reject = + Reject(details)(LocalVerdict.protocolVersionRepresentativeFor(protocolVersion)) + } + } + + @Explanation( + """This rejection is emitted by a participant if a transfer-in is referring to an already locked contract.""" + ) + object ContractIsLocked + extends LocalRejectErrorCode( + id = "TRANSFER_IN_CONTRACT_IS_LOCKED", + ErrorCategory.ContentionOnSharedResources, + v0.LocalReject.Code.TransferInLocked, + ) { + + final case class Reject(override val _details: String)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + LocalVerdict.type + ] + ) extends LocalRejectImpl( + _causePrefix = "Rejected transfer as the transferred contract is locked." + ) + + object Reject { + def apply(details: String, protocolVersion: ProtocolVersion): Reject = + Reject(details)(LocalVerdict.protocolVersionRepresentativeFor(protocolVersion)) + } + } + + @Explanation( + """This rejection is emitted by a participant if a transfer-in has already been completed.""" + ) + object AlreadyCompleted + extends LocalRejectErrorCode( + id = "TRANSFER_IN_ALREADY_COMPLETED", + ErrorCategory.InvalidGivenCurrentSystemStateResourceExists, + v0.LocalReject.Code.TransferInAlreadyCompleted, + ) { + + final case class Reject(override val _details: String)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + LocalVerdict.type + ] + ) extends LocalRejectImpl( + _causePrefix = "Rejected transfer as the transfer has already completed " + ) + + object Reject { + def apply(details: String, protocolVersion: ProtocolVersion): Reject = + Reject(details)(LocalVerdict.protocolVersionRepresentativeFor(protocolVersion)) + } + } + } + + /** Fallback for deserializing local rejects that are not known to the current Canton version. + * Should not be serialized. + */ + final case class GenericReject( + override val _causePrefix: String, + override val _details: String, + override val _resources: Seq[String], + id: String, + category: ErrorCategory, + )( + override val representativeProtocolVersion: RepresentativeProtocolVersion[LocalVerdict.type] + ) extends LocalRejectImpl( + _causePrefix = _causePrefix, + // Append _resources to details, because we don't know _resourcesType and the _resources field is ignored if _resourcesType is None. + _details = _details + _resources.mkString(", "), + )( + new LocalRejectErrorCode( + id, + category, + v0.LocalReject.Code.LocalTimeout, // Using a dummy value, as this will not we used. + ) {} + ) + + object GenericReject { + def apply( + causePrefix: String, + details: String, + resources: Seq[String], + id: String, + category: ErrorCategory, + protocolVersion: ProtocolVersion, + ): GenericReject = + GenericReject(causePrefix, details, resources, id, category)( + LocalVerdict.protocolVersionRepresentativeFor(protocolVersion) + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/MalformedMediatorRequestResult.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/MalformedMediatorRequestResult.scala new file mode 100644 index 0000000000..46e42745e6 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/MalformedMediatorRequestResult.scala @@ -0,0 +1,146 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import cats.syntax.either.* +import com.digitalasset.canton.crypto.HashPurpose +import com.digitalasset.canton.data.ViewType +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.messages.SignedProtocolMessageContent.SignedMessageContentCast +import com.digitalasset.canton.protocol.messages.Verdict.MediatorReject +import com.digitalasset.canton.protocol.{RequestId, v0, v2} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.DomainId +import com.digitalasset.canton.version.{ + HasMemoizedProtocolVersionedWrapperCompanion, + HasProtocolVersionedWrapper, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} +import com.google.protobuf.ByteString + +/** Sent by the mediator to indicate that a mediator request was malformed. + * The request counts as being rejected and the request UUID will not be deduplicated. + * + * @param requestId The ID of the malformed request + * @param domainId The domain ID of the mediator + * @param verdict The rejection reason as a verdict + */ +@SuppressWarnings(Array("org.wartremover.warts.FinalCaseClass")) // This class is mocked in tests +case class MalformedMediatorRequestResult private ( + override val requestId: RequestId, + override val domainId: DomainId, + override val viewType: ViewType, + override val verdict: Verdict.MediatorReject, +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + MalformedMediatorRequestResult.type + ], + override val deserializedFrom: Option[ByteString], +) extends MediatorResult + with SignedProtocolMessageContent + with HasProtocolVersionedWrapper[MalformedMediatorRequestResult] + with PrettyPrinting { + + override def hashPurpose: HashPurpose = HashPurpose.MalformedMediatorRequestResult + + override protected[messages] def toProtoTypedSomeSignedProtocolMessage + : v0.TypedSignedProtocolMessageContent.SomeSignedProtocolMessage = + v0.TypedSignedProtocolMessageContent.SomeSignedProtocolMessage.MalformedMediatorRequestResult( + getCryptographicEvidence + ) + + @transient override protected lazy val companionObj: MalformedMediatorRequestResult.type = + MalformedMediatorRequestResult + + protected def toProtoV2: v2.MalformedMediatorRequestResult = + v2.MalformedMediatorRequestResult( + requestId = Some(requestId.toProtoPrimitive), + domainId = domainId.toProtoPrimitive, + viewType = viewType.toProtoEnum, + rejection = Some(verdict.toProtoMediatorRejectV2), + ) + + override protected[this] def toByteStringUnmemoized: ByteString = + super[HasProtocolVersionedWrapper].toByteString + + override def pretty: Pretty[MalformedMediatorRequestResult] = prettyOfClass( + param("request id", _.requestId), + param("reject", _.verdict), + param("view type", _.viewType), + param("domain id", _.domainId), + ) +} + +object MalformedMediatorRequestResult + extends HasMemoizedProtocolVersionedWrapperCompanion[MalformedMediatorRequestResult] { + override val name: String = "MalformedMediatorRequestResult" + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(2) -> VersionedProtoConverter(ProtocolVersion.v30)( + v2.MalformedMediatorRequestResult + )( + supportedProtoVersionMemoized(_)(fromProtoV2), + _.toProtoV2.toByteString, + ) + ) + + def tryCreate( + requestId: RequestId, + domainId: DomainId, + viewType: ViewType, + verdict: Verdict.MediatorReject, + protocolVersion: ProtocolVersion, + ): MalformedMediatorRequestResult = + MalformedMediatorRequestResult(requestId, domainId, viewType, verdict)( + protocolVersionRepresentativeFor(protocolVersion), + None, + ) + + def create( + requestId: RequestId, + domainId: DomainId, + viewType: ViewType, + verdict: Verdict.MediatorReject, + protocolVersion: ProtocolVersion, + ): Either[String, MalformedMediatorRequestResult] = + Either + .catchOnly[IllegalArgumentException]( + MalformedMediatorRequestResult + .tryCreate(requestId, domainId, viewType, verdict, protocolVersion) + ) + .leftMap(_.getMessage) + + private def fromProtoV2(malformedMediatorRequestResultP: v2.MalformedMediatorRequestResult)( + bytes: ByteString + ): ParsingResult[MalformedMediatorRequestResult] = { + + val v2.MalformedMediatorRequestResult(requestIdPO, domainIdP, viewTypeP, rejectionPO) = + malformedMediatorRequestResultP + for { + requestId <- ProtoConverter + .required("request_id", requestIdPO) + .flatMap(RequestId.fromProtoPrimitive) + domainId <- DomainId.fromProtoPrimitive(domainIdP, "domain_id") + viewType <- ViewType.fromProtoEnum(viewTypeP) + reject <- ProtoConverter.parseRequired( + MediatorReject.fromProtoV2, + "rejection", + rejectionPO, + ) + } yield MalformedMediatorRequestResult(requestId, domainId, viewType, reject)( + protocolVersionRepresentativeFor(ProtoVersion(3)), + Some(bytes), + ) + } + + implicit val malformedMediatorRequestResultCast + : SignedMessageContentCast[MalformedMediatorRequestResult] = SignedMessageContentCast + .create[MalformedMediatorRequestResult]("MalformedMediatorRequestResult") { + case m: MalformedMediatorRequestResult => Some(m) + case _ => None + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/MediatorRequest.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/MediatorRequest.scala new file mode 100644 index 0000000000..0109688e18 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/MediatorRequest.scala @@ -0,0 +1,55 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import com.digitalasset.canton.LfPartyId +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.data.{Informee, ViewPosition, ViewType} +import com.digitalasset.canton.protocol.messages.ProtocolMessage.ProtocolMessageContentCast +import com.digitalasset.canton.protocol.{RequestId, RootHash, ViewHash} +import com.digitalasset.canton.topology.MediatorRef + +import java.util.UUID + +trait MediatorRequest extends UnsignedProtocolMessage { + def requestUuid: UUID + + def mediator: MediatorRef + + def informeesAndThresholdByViewHash: Map[ViewHash, (Set[Informee], NonNegativeInt)] + + def informeesAndThresholdByViewPosition: Map[ViewPosition, (Set[Informee], NonNegativeInt)] + + def allInformees: Set[LfPartyId] = + informeesAndThresholdByViewPosition + .flatMap { case (_, (informees, _)) => + informees + } + .map(_.party) + .toSet + + def createMediatorResult( + requestId: RequestId, + verdict: Verdict, + recipientParties: Set[LfPartyId], + ): MediatorResult with SignedProtocolMessageContent + + def minimumThreshold(informees: Set[Informee]): NonNegativeInt + + /** Returns the hash that all [[com.digitalasset.canton.protocol.messages.RootHashMessage]]s of the request batch should contain. + * [[scala.None$]] indicates that no [[com.digitalasset.canton.protocol.messages.RootHashMessage]] should be in the batch. + */ + def rootHash: Option[RootHash] + + def viewType: ViewType +} + +object MediatorRequest { + implicit val mediatorRequestProtocolMessageContentCast + : ProtocolMessageContentCast[MediatorRequest] = + ProtocolMessageContentCast.create[MediatorRequest]("MediatorRequest") { + case m: MediatorRequest => Some(m) + case _ => None + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/MediatorResponse.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/MediatorResponse.scala new file mode 100644 index 0000000000..191b85f4bf --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/MediatorResponse.scala @@ -0,0 +1,306 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.digitalasset.canton.LfPartyId +import com.digitalasset.canton.ProtoDeserializationError.InvariantViolation +import com.digitalasset.canton.crypto.HashPurpose +import com.digitalasset.canton.data.{CantonTimestamp, ViewPosition} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.protocol.messages.MediatorResponse.InvalidMediatorResponse +import com.digitalasset.canton.protocol.messages.SignedProtocolMessageContent.SignedMessageContentCast +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.{DomainId, ParticipantId} +import com.digitalasset.canton.version.* +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.ByteString +import monocle.Lens +import monocle.macros.GenLens + +import scala.math.Ordering.Implicits.infixOrderingOps + +/** Payload of a response sent to the mediator in reaction to a request. + * + * @param requestId The unique identifier of the request. + * @param sender The identity of the sender. + * @param viewPositionO the view position of the underlying view. + * May be empty if the [[localVerdict]] is [[com.digitalasset.canton.protocol.messages.LocalReject.Malformed]]. + * Must be empty if the protoVersion is strictly lower than 2. + * @param localVerdict The participant's verdict on the request's view. + * @param rootHash The root hash of the request if the local verdict is [[com.digitalasset.canton.protocol.messages.LocalApprove]] + * or [[com.digitalasset.canton.protocol.messages.LocalReject]]. [[scala.None$]] otherwise. + * @param confirmingParties The non-empty set of confirming parties of the view hosted by the sender if the local verdict is [[com.digitalasset.canton.protocol.messages.LocalApprove]] + * or [[com.digitalasset.canton.protocol.messages.LocalReject]]. Empty otherwise. + * @param domainId The domain ID over which the request is sent. + */ + +/* +This class is a reference example of serialization best practices, demonstrating: + * handling of object invariants (i.e., the construction of an instance may fail with an exception) + +Please consult the team if you intend to change the design of serialization. + +Because + * `fromProtoV0` is private, + * the class is `sealed abstract`, +then clients cannot create instances with an incorrect `deserializedFrom` field. + +Optional parameters are strongly discouraged, as each parameter needs to be consciously set in a production context. + */ +@SuppressWarnings(Array("org.wartremover.warts.FinalCaseClass")) // This class is mocked in tests +case class MediatorResponse private ( + requestId: RequestId, + sender: ParticipantId, + viewPositionO: Option[ViewPosition], + localVerdict: LocalVerdict, + rootHash: Option[RootHash], + confirmingParties: Set[LfPartyId], + override val domainId: DomainId, +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + MediatorResponse.type + ], + override val deserializedFrom: Option[ByteString], +) extends SignedProtocolMessageContent + with HasProtocolVersionedWrapper[MediatorResponse] + with HasDomainId + with PrettyPrinting { + + // Private copy method used by the lenses in the companion object + private def copy( + requestId: RequestId = requestId, + sender: ParticipantId = sender, + viewPositionO: Option[ViewPosition] = viewPositionO, + localVerdict: LocalVerdict = localVerdict, + rootHash: Option[RootHash] = rootHash, + confirmingParties: Set[LfPartyId] = confirmingParties, + domainId: DomainId = domainId, + ): MediatorResponse = MediatorResponse( + requestId, + sender, + viewPositionO, + localVerdict, + rootHash, + confirmingParties, + domainId, + )(representativeProtocolVersion, None) + + // If an object invariant is violated, throw an exception specific to the class. + // Thus, the exception can be caught during deserialization and translated to a human readable error message. + localVerdict match { + case _: Malformed => + if (confirmingParties.nonEmpty) + throw InvalidMediatorResponse("Confirming parties must be empty for verdict Malformed.") + case _: LocalApprove | _: LocalReject => + if (confirmingParties.isEmpty) + throw InvalidMediatorResponse( + show"Confirming parties must not be empty for verdict $localVerdict" + ) + if (rootHash.isEmpty) + throw InvalidMediatorResponse(show"Root hash must not be empty for verdict $localVerdict") + if (protoVersion >= ProtoVersion(2) && viewPositionO.isEmpty) + throw InvalidMediatorResponse( + show"View position must not be empty for verdict $localVerdict" + ) + } + + if (protoVersion < ProtoVersion(2) && viewPositionO.nonEmpty) + throw InvalidMediatorResponse( + s"View position must be empty for protoVersion $protoVersion." + ) + + override def signingTimestamp: CantonTimestamp = requestId.unwrap + + protected override def toByteStringUnmemoized: ByteString = + super[HasProtocolVersionedWrapper].toByteString + + @transient override protected lazy val companionObj: MediatorResponse.type = MediatorResponse + + protected def toProtoV2: v2.MediatorResponse = + v2.MediatorResponse( + requestId = Some(requestId.toProtoPrimitive), + sender = sender.toProtoPrimitive, + viewPosition = viewPositionO.map(_.toProtoV2), + localVerdict = Some(localVerdict.toProtoV1), + rootHash = rootHash.fold(ByteString.EMPTY)(_.toProtoPrimitive), + confirmingParties = confirmingParties.toList, + domainId = domainId.toProtoPrimitive, + ) + + override protected[messages] def toProtoTypedSomeSignedProtocolMessage + : v0.TypedSignedProtocolMessageContent.SomeSignedProtocolMessage = + v0.TypedSignedProtocolMessageContent.SomeSignedProtocolMessage.MediatorResponse( + getCryptographicEvidence + ) + + override def hashPurpose: HashPurpose = HashPurpose.MediatorResponseSignature + + override def pretty: Pretty[this.type] = + prettyOfClass( + param("sender", _.sender), + param("localVerdict", _.localVerdict), + param("confirmingParties", _.confirmingParties), + param("domainId", _.domainId), + param("requestId", _.requestId), + paramIfDefined("viewPosition", _.viewPositionO), + paramIfDefined("rootHash", _.rootHash), + param("representativeProtocolVersion", _.representativeProtocolVersion), + ) +} + +object MediatorResponse extends HasMemoizedProtocolVersionedWrapperCompanion[MediatorResponse] { + override val name: String = "MediatorResponse" + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(2) -> VersionedProtoConverter(ProtocolVersion.v30)(v2.MediatorResponse)( + supportedProtoVersionMemoized(_)(fromProtoV2), + _.toProtoV2.toByteString, + ) + ) + + final case class InvalidMediatorResponse(msg: String) extends RuntimeException(msg) + + // Variant of "tryCreate" that returns Left(...) instead of throwing an exception. + // This is for callers who *do not know up front* whether the parameters meet the object invariants. + // + // Optional method, feel free to omit it. + def create( + requestId: RequestId, + sender: ParticipantId, + viewPositionO: Option[ViewPosition], + localVerdict: LocalVerdict, + rootHash: Option[RootHash], + confirmingParties: Set[LfPartyId], + domainId: DomainId, + protocolVersion: ProtocolVersion, + ): Either[InvalidMediatorResponse, MediatorResponse] = + Either.catchOnly[InvalidMediatorResponse]( + tryCreate( + requestId, + sender, + viewPositionO, + localVerdict, + rootHash, + confirmingParties, + domainId, + protocolVersion, + ) + ) + + // This method is tailored to the case that the caller already knows that the parameters meet the object invariants. + // Consequently, the method throws an exception on invalid parameters. + // + // The "tryCreate" method has the following advantage over the auto-generated "apply" method: + // - The deserializedFrom field cannot be set; so it cannot be set incorrectly. + // + // The method is called "tryCreate" instead of "apply" for two reasons: + // - to emphasize that this method may throw an exception + // - to not confuse the Idea compiler by overloading "apply". + // (This is not a problem with this particular class, but it has been a problem with other classes.) + // + // The "tryCreate" method is optional. + // Feel free to omit "tryCreate", if the auto-generated "apply" method is good enough. + def tryCreate( + requestId: RequestId, + sender: ParticipantId, + viewPositionO: Option[ViewPosition], + localVerdict: LocalVerdict, + rootHash: Option[RootHash], + confirmingParties: Set[LfPartyId], + domainId: DomainId, + protocolVersion: ProtocolVersion, + ): MediatorResponse = + MediatorResponse( + requestId, + sender, + viewPositionO, + localVerdict, + rootHash, + confirmingParties, + domainId, + )(protocolVersionRepresentativeFor(protocolVersion), None) + + /** DO NOT USE IN PRODUCTION, as this does not necessarily check object invariants. */ + @VisibleForTesting + val requestIdUnsafe: Lens[MediatorResponse, RequestId] = GenLens[MediatorResponse](_.requestId) + + /** DO NOT USE IN PRODUCTION, as this does not necessarily check object invariants. */ + @VisibleForTesting + val senderUnsafe: Lens[MediatorResponse, ParticipantId] = GenLens[MediatorResponse](_.sender) + + /** DO NOT USE IN PRODUCTION, as this does not necessarily check object invariants. */ + @VisibleForTesting + val viewPositionOUnsafe: Lens[MediatorResponse, Option[ViewPosition]] = + GenLens[MediatorResponse](_.viewPositionO) + + /** DO NOT USE IN PRODUCTION, as this does not necessarily check object invariants. */ + @VisibleForTesting + val localVerdictUnsafe: Lens[MediatorResponse, LocalVerdict] = + GenLens[MediatorResponse](_.localVerdict) + + /** DO NOT USE IN PRODUCTION, as this does not necessarily check object invariants. */ + @VisibleForTesting + val rootHashUnsafe: Lens[MediatorResponse, Option[RootHash]] = + GenLens[MediatorResponse](_.rootHash) + + /** DO NOT USE IN PRODUCTION, as this does not necessarily check object invariants. */ + @VisibleForTesting + val confirmingPartiesUnsafe: Lens[MediatorResponse, Set[LfPartyId]] = + GenLens[MediatorResponse](_.confirmingParties) + + private def fromProtoV2(mediatorResponseP: v2.MediatorResponse)( + bytes: ByteString + ): ParsingResult[MediatorResponse] = { + val v2.MediatorResponse( + requestIdPO, + senderP, + localVerdictPO, + rootHashP, + confirmingPartiesP, + domainIdP, + viewPositionPO, + ) = + mediatorResponseP + for { + requestId <- ProtoConverter + .required("MediatorResponse.request_id", requestIdPO) + .flatMap(RequestId.fromProtoPrimitive) + sender <- ParticipantId.fromProtoPrimitive(senderP, "MediatorResponse.sender") + localVerdict <- ProtoConverter + .required("MediatorResponse.local_verdict", localVerdictPO) + .flatMap(LocalVerdict.fromProtoV1) + rootHashO <- RootHash.fromProtoPrimitiveOption(rootHashP) + confirmingParties <- confirmingPartiesP.traverse(ProtoConverter.parseLfPartyId) + domainId <- DomainId.fromProtoPrimitive(domainIdP, "domain_id") + viewPositionO = viewPositionPO.map(ViewPosition.fromProtoV2) + response <- Either + .catchOnly[InvalidMediatorResponse]( + MediatorResponse( + requestId, + sender, + viewPositionO, + localVerdict, + rootHashO, + confirmingParties.toSet, + domainId, + )( + supportedProtoVersions.protocolVersionRepresentativeFor(ProtoVersion(2)), + Some(bytes), + ) + ) + .leftMap(err => InvariantViolation(err.toString)) + } yield response + } + + implicit val mediatorResponseSignedMessageContentCast + : SignedMessageContentCast[MediatorResponse] = + SignedMessageContentCast.create[MediatorResponse]("MediatorResponse") { + case response: MediatorResponse => Some(response) + case _ => None + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/MediatorResult.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/MediatorResult.scala new file mode 100644 index 0000000000..2c22f0c139 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/MediatorResult.scala @@ -0,0 +1,34 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import com.digitalasset.canton.data.{CantonTimestamp, ViewType} +import com.digitalasset.canton.protocol.messages.SignedProtocolMessageContent.SignedMessageContentCast +import com.digitalasset.canton.serialization.ProtocolVersionedMemoizedEvidence + +trait MediatorResult + extends ProtocolVersionedMemoizedEvidence + with HasDomainId + with HasRequestId + with SignedProtocolMessageContent { + + def verdict: Verdict + + override def signingTimestamp: CantonTimestamp = requestId.unwrap + + def viewType: ViewType +} + +/** The mediator issues a regular mediator result for well-formed mediator requests. + * Malformed mediator requests lead to a [[MalformedMediatorRequestResult]]. + */ +trait RegularMediatorResult extends MediatorResult + +object RegularMediatorResult { + implicit val regularMediatorResultMessageCast: SignedMessageContentCast[RegularMediatorResult] = + SignedMessageContentCast.create[RegularMediatorResult]("RegularMediatorResult") { + case m: RegularMediatorResult => Some(m) + case _ => None + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/ProtocolMessage.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/ProtocolMessage.scala new file mode 100644 index 0000000000..f471ef3116 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/ProtocolMessage.scala @@ -0,0 +1,81 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.v4 +import com.digitalasset.canton.sequencing.protocol.{Batch, OpenEnvelope} +import com.digitalasset.canton.topology.DomainId +import com.digitalasset.canton.version.{ + HasRepresentativeProtocolVersion, + RepresentativeProtocolVersion, +} + +/** Parent trait of messages that are sent through the sequencer + */ +trait ProtocolMessage + extends Product + with Serializable + with HasDomainId + with PrettyPrinting + with HasRepresentativeProtocolVersion { + + override def representativeProtocolVersion: RepresentativeProtocolVersion[companionObj.type] + + /** The ID of the domain over which this message is supposed to be sent. */ + def domainId: DomainId + + /** By default prints only the object name as a trade-off for shorter long lines and not leaking confidential data. + * Sub-classes may override the pretty instance to print more information. + */ + override def pretty: Pretty[this.type] = prettyOfObject[ProtocolMessage] +} + +/** Marker trait for [[ProtocolMessage]]s that are not a [[SignedProtocolMessage]] */ +trait UnsignedProtocolMessage extends ProtocolMessage { + protected[messages] def toProtoSomeEnvelopeContentV4: v4.EnvelopeContent.SomeEnvelopeContent +} + +object ProtocolMessage { + + /** Returns the envelopes from the batch that match the given domain ID. If any other messages exist, it gives them + * to the provided callback + */ + def filterDomainsEnvelopes[M <: ProtocolMessage]( + batch: Batch[OpenEnvelope[M]], + domainId: DomainId, + onWrongDomain: List[OpenEnvelope[M]] => Unit, + ): List[OpenEnvelope[M]] = { + val (withCorrectDomainId, withWrongDomainId) = + batch.envelopes.partition(_.protocolMessage.domainId == domainId) + if (withWrongDomainId.nonEmpty) + onWrongDomain(withWrongDomainId) + withCorrectDomainId + } + + trait ProtocolMessageContentCast[A <: ProtocolMessage] { + def toKind(message: ProtocolMessage): Option[A] + def targetKind: String + } + + object ProtocolMessageContentCast { + def create[A <: ProtocolMessage](name: String)( + cast: ProtocolMessage => Option[A] + ): ProtocolMessageContentCast[A] = new ProtocolMessageContentCast[A] { + override def toKind(message: ProtocolMessage): Option[A] = cast(message) + + override def targetKind: String = name + } + } + + def toKind[M <: ProtocolMessage](envelope: DefaultOpenEnvelope)(implicit + cast: ProtocolMessageContentCast[M] + ): Option[M] = + cast.toKind(envelope.protocolMessage) + + def select[M <: ProtocolMessage](envelope: DefaultOpenEnvelope)(implicit + cast: ProtocolMessageContentCast[M] + ): Option[OpenEnvelope[M]] = + envelope.traverse(cast.toKind) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RegisterTopologyTransactionRequest.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RegisterTopologyTransactionRequest.scala new file mode 100644 index 0000000000..0e9c8052d3 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RegisterTopologyTransactionRequest.scala @@ -0,0 +1,121 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import cats.syntax.traverse.* +import com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString.TopologyRequestId +import com.digitalasset.canton.config.CantonRequireTypes.String255 +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.messages.ProtocolMessage.ProtocolMessageContentCast +import com.digitalasset.canton.protocol.{v0, v4} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.topology.{DomainId, Member, ParticipantId, UniqueIdentifier} +import com.digitalasset.canton.version.{ + HasProtocolVersionedCompanion, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} + +/** @param representativeProtocolVersion The representativeProtocolVersion must correspond to the protocol version of + * every transaction in the list (enforced by the factory method) + */ +final case class RegisterTopologyTransactionRequest private ( + requestedBy: Member, + participant: ParticipantId, + requestId: TopologyRequestId, + transactions: List[SignedTopologyTransaction[TopologyChangeOp]], + override val domainId: DomainId, +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + RegisterTopologyTransactionRequest.type + ] +) extends UnsignedProtocolMessage + with PrettyPrinting { + + override def toProtoSomeEnvelopeContentV4: v4.EnvelopeContent.SomeEnvelopeContent = + v4.EnvelopeContent.SomeEnvelopeContent.RegisterTopologyTransactionRequest(toProtoV0) + + def toProtoV0: v0.RegisterTopologyTransactionRequest = + v0.RegisterTopologyTransactionRequest( + requestedBy = requestedBy.toProtoPrimitive, + participant = participant.uid.toProtoPrimitive, + requestId = requestId.toProtoPrimitive, + signedTopologyTransactions = transactions.map(_.getCryptographicEvidence), + domainId = domainId.unwrap.toProtoPrimitive, + ) + + @transient override protected lazy val companionObj: RegisterTopologyTransactionRequest.type = + RegisterTopologyTransactionRequest + + override def pretty: Pretty[RegisterTopologyTransactionRequest] = prettyOfClass( + param("requestBy", _.requestedBy), + param("participant", _.participant), + param("requestId", _.requestId.unwrap.doubleQuoted), + param("numTx", _.transactions.length), + ) + +} + +object RegisterTopologyTransactionRequest + extends HasProtocolVersionedCompanion[RegisterTopologyTransactionRequest] { + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> VersionedProtoConverter(ProtocolVersion.v30)( + v0.RegisterTopologyTransactionRequest + )( + supportedProtoVersion(_)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + def create( + requestedBy: Member, + participant: ParticipantId, + requestId: TopologyRequestId, + transactions: List[SignedTopologyTransaction[TopologyChangeOp]], + domainId: DomainId, + protocolVersion: ProtocolVersion, + ): Iterable[RegisterTopologyTransactionRequest] = Seq( + RegisterTopologyTransactionRequest( + requestedBy = requestedBy, + participant = participant, + requestId = requestId, + transactions = transactions, + domainId = domainId, + )(protocolVersionRepresentativeFor(protocolVersion)) + ) + + def fromProtoV0( + message: v0.RegisterTopologyTransactionRequest + ): ParsingResult[RegisterTopologyTransactionRequest] = { + for { + requestedBy <- Member.fromProtoPrimitive(message.requestedBy, "requestedBy") + participantUid <- UniqueIdentifier.fromProtoPrimitive(message.participant, "participant") + transactions <- message.signedTopologyTransactions.toList.traverse(elem => + SignedTopologyTransaction.fromByteString(elem) + ) + domainUid <- UniqueIdentifier.fromProtoPrimitive(message.domainId, "domainId") + requestId <- String255.fromProtoPrimitive(message.requestId, "requestId") + } yield RegisterTopologyTransactionRequest( + requestedBy, + ParticipantId(participantUid), + requestId, + transactions, + DomainId(domainUid), + )(protocolVersionRepresentativeFor(ProtoVersion(0))) + } + + override def name: String = "RegisterTopologyTransactionRequest" + + implicit val registerTopologyTransactionRequestCast + : ProtocolMessageContentCast[RegisterTopologyTransactionRequest] = + ProtocolMessageContentCast.create[RegisterTopologyTransactionRequest]( + "RegisterTopologyTransactionRequest" + ) { + case rttr: RegisterTopologyTransactionRequest => Some(rttr) + case _ => None + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RegisterTopologyTransactionResponse.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RegisterTopologyTransactionResponse.scala new file mode 100644 index 0000000000..eda2075e5a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RegisterTopologyTransactionResponse.scala @@ -0,0 +1,187 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import cats.syntax.traverse.* +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString.TopologyRequestId +import com.digitalasset.canton.config.CantonRequireTypes.String255 +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.messages.ProtocolMessage.ProtocolMessageContentCast +import com.digitalasset.canton.protocol.v1.RegisterTopologyTransactionResponse.Result.State as ProtoStateV1 +import com.digitalasset.canton.protocol.{v1, v4} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.{DomainId, Member, ParticipantId, UniqueIdentifier} +import com.digitalasset.canton.version.{ + HasProtocolVersionedCompanion, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} + +final case class RegisterTopologyTransactionResponse( + requestedBy: Member, + participant: ParticipantId, + requestId: TopologyRequestId, + results: Seq[RegisterTopologyTransactionResponseResult], + override val domainId: DomainId, +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + RegisterTopologyTransactionResponse.type + ] +) extends UnsignedProtocolMessage { + + override def toProtoSomeEnvelopeContentV4: v4.EnvelopeContent.SomeEnvelopeContent = + v4.EnvelopeContent.SomeEnvelopeContent.RegisterTopologyTransactionResponse(toProtoV1) + + def toProtoV1: v1.RegisterTopologyTransactionResponse = + v1.RegisterTopologyTransactionResponse( + requestedBy = requestedBy.toProtoPrimitive, + participant = participant.uid.toProtoPrimitive, + requestId = requestId.unwrap, + results = results.map(_.toProtoV1), + domainId = domainId.unwrap.toProtoPrimitive, + ) + + @transient override protected lazy val companionObj: RegisterTopologyTransactionResponse.type = + RegisterTopologyTransactionResponse +} + +object RegisterTopologyTransactionResponse + extends HasProtocolVersionedCompanion[RegisterTopologyTransactionResponse] { + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)( + v1.RegisterTopologyTransactionResponse + )( + supportedProtoVersion(_)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + def apply( + requestedBy: Member, + participant: ParticipantId, + requestId: TopologyRequestId, + results: Seq[RegisterTopologyTransactionResponseResult], + domainId: DomainId, + protocolVersion: ProtocolVersion, + ): RegisterTopologyTransactionResponse = + RegisterTopologyTransactionResponse(requestedBy, participant, requestId, results, domainId)( + protocolVersionRepresentativeFor(protocolVersion) + ) + + private[messages] def fromProtoV1( + message: v1.RegisterTopologyTransactionResponse + ): ParsingResult[RegisterTopologyTransactionResponse] = + for { + requestedBy <- Member.fromProtoPrimitive(message.requestedBy, "requestedBy") + participantUid <- UniqueIdentifier.fromProtoPrimitive(message.participant, "participant") + domainUid <- UniqueIdentifier.fromProtoPrimitive(message.domainId, "domainId") + requestId <- String255.fromProtoPrimitive(message.requestId, "requestId") + results <- message.results.traverse(RegisterTopologyTransactionResponseResult.fromProtoV1) + } yield RegisterTopologyTransactionResponse( + requestedBy, + ParticipantId(participantUid), + requestId, + results, + DomainId(domainUid), + )(protocolVersionRepresentativeFor(ProtoVersion(1))) + + override def name: String = "RegisterTopologyTransactionResponse" + + def create( + request: RegisterTopologyTransactionRequest, + results: List[RegisterTopologyTransactionResponseResult], + protocolVersion: ProtocolVersion, + ): RegisterTopologyTransactionResponse = + RegisterTopologyTransactionResponse( + request.requestedBy, + request.participant, + request.requestId, + results, + request.domainId, + )(RegisterTopologyTransactionResponse.protocolVersionRepresentativeFor(protocolVersion)) + + implicit val registerTopologyTransactionResponseCast: ProtocolMessageContentCast[ + RegisterTopologyTransactionResponse + ] = + ProtocolMessageContentCast + .create[RegisterTopologyTransactionResponse]( + "RegisterTopologyTransactionResponse" + ) { + case rttr: RegisterTopologyTransactionResponse => Some(rttr) + case _ => None + } +} + +final case class RegisterTopologyTransactionResponseResult( + state: RegisterTopologyTransactionResponseResult.State +) extends PrettyPrinting { + + def toProtoV1: v1.RegisterTopologyTransactionResponse.Result = { + import RegisterTopologyTransactionResponseResult.* + + def reply(state: v1.RegisterTopologyTransactionResponse.Result.State) = + v1.RegisterTopologyTransactionResponse.Result( + state = state, + errorMessage = "", + ) + + state match { + case State.Failed => reply(ProtoStateV1.FAILED) + case State.Rejected => reply(ProtoStateV1.REJECTED) + case State.Accepted => reply(ProtoStateV1.ACCEPTED) + case State.Duplicate => reply(ProtoStateV1.DUPLICATE) + case State.Obsolete => reply(ProtoStateV1.OBSOLETE) + } + } + + override def pretty: Pretty[RegisterTopologyTransactionResponseResult] = prettyOfClass( + param("state", _.state) + ) +} + +object RegisterTopologyTransactionResponseResult { + sealed trait State extends Product with Serializable with PrettyPrinting { + override def pretty: Pretty[this.type] = prettyOfObject[this.type] + } + + object State { + case object Failed extends State + + case object Rejected extends State + + case object Accepted extends State + + case object Duplicate extends State + + /** Unnecessary removes are marked as obsolete */ + case object Obsolete extends State + } + + def fromProtoV1( + result: v1.RegisterTopologyTransactionResponse.Result + ): ParsingResult[RegisterTopologyTransactionResponseResult] = { + result.state match { + case ProtoStateV1.MISSING_STATE => + Left( + ProtoDeserializationError.OtherError( + "Missing state for v1.RegisterTopologyTransactionResponse.State.Result" + ) + ) + case ProtoStateV1.FAILED => Right(RegisterTopologyTransactionResponseResult(State.Failed)) + case ProtoStateV1.REJECTED => Right(RegisterTopologyTransactionResponseResult(State.Rejected)) + case ProtoStateV1.ACCEPTED => Right(RegisterTopologyTransactionResponseResult(State.Accepted)) + case ProtoStateV1.DUPLICATE => + Right(RegisterTopologyTransactionResponseResult(State.Duplicate)) + case ProtoStateV1.OBSOLETE => Right(RegisterTopologyTransactionResponseResult(State.Obsolete)) + case ProtoStateV1.Unrecognized(unrecognizedValue) => + Left( + ProtoDeserializationError.OtherError( + s"Unrecognised state for v1.RegisterTopologyTransactionResponse.State.Result: $unrecognizedValue" + ) + ) + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RootHashMessage.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RootHashMessage.scala new file mode 100644 index 0000000000..42170b1c23 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RootHashMessage.scala @@ -0,0 +1,204 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import cats.Functor +import com.digitalasset.canton.ProtoDeserializationError.ValueDeserializationError +import com.digitalasset.canton.data.ViewType +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.messages.ProtocolMessage.ProtocolMessageContentCast +import com.digitalasset.canton.protocol.messages.RootHashMessage.RootHashMessagePayloadCast +import com.digitalasset.canton.protocol.{RootHash, v0, v4} +import com.digitalasset.canton.serialization.HasCryptographicEvidence +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.DomainId +import com.digitalasset.canton.version.{ + HasProtocolVersionedWithContextCompanion, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} +import com.google.protobuf.ByteString + +/** One root hash message is sent for each participant involved in a mediator request that requires root hash messages. + * The root hash message is delivered to the participant and the mediator. + * The mediator checks that it receives the right root hash messages + * and that they all contain the root hash that the mediator request message specifies. + * The mediator also checks that all payloads have the same serialization and, + * if it can parse the mediator request envelope, that the payload fits to the mediator request. + */ +final case class RootHashMessage[+Payload <: RootHashMessagePayload]( + rootHash: RootHash, + override val domainId: DomainId, + viewType: ViewType, + payload: Payload, +)(override val representativeProtocolVersion: RepresentativeProtocolVersion[RootHashMessage.type]) + extends UnsignedProtocolMessage + with PrettyPrinting { + + override def toProtoSomeEnvelopeContentV4: v4.EnvelopeContent.SomeEnvelopeContent = + v4.EnvelopeContent.SomeEnvelopeContent.RootHashMessage(toProtoV0) + + def toProtoV0: v0.RootHashMessage = v0.RootHashMessage( + rootHash = rootHash.toProtoPrimitive, + domainId = domainId.toProtoPrimitive, + viewType = viewType.toProtoEnum, + payload = payload.getCryptographicEvidence, + ) + + override def pretty: Pretty[RootHashMessage.this.type] = + prettyOfClass( + param("root hash", _.rootHash), + param("payload size", _.payload.getCryptographicEvidence.size()), + ) + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def map[Payload2 <: RootHashMessagePayload](f: Payload => Payload2): RootHashMessage[Payload2] = { + val payload2 = f(payload) + if (payload eq payload2) this.asInstanceOf[RootHashMessage[Payload2]] + else this.copy(payload = payload2) + } + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def traverse[F[_], Payload2 <: RootHashMessagePayload]( + f: Payload => F[Payload2] + )(implicit F: Functor[F]): F[RootHashMessage[Payload2]] = + F.map(f(payload)) { payload2 => + if (payload eq payload2) this.asInstanceOf[RootHashMessage[Payload2]] + else this.copy(payload = payload2) + } + + def copy[Payload2 <: RootHashMessagePayload]( + rootHash: RootHash = rootHash, + payload: Payload2 = payload, + viewType: ViewType = viewType, + ): RootHashMessage[Payload2] = + RootHashMessage( + rootHash, + domainId, + viewType, + payload, + )(representativeProtocolVersion) + + @transient override protected lazy val companionObj: RootHashMessage.type = RootHashMessage +} + +object RootHashMessage + extends HasProtocolVersionedWithContextCompanion[RootHashMessage[ + RootHashMessagePayload + ], ByteString => ParsingResult[RootHashMessagePayload]] { + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> VersionedProtoConverter(ProtocolVersion.v30)(v0.RootHashMessage)( + supportedProtoVersion(_)((deserializer, proto) => fromProtoV0(deserializer)(proto)), + _.toProtoV0.toByteString, + ) + ) + + def apply[Payload <: RootHashMessagePayload]( + rootHash: RootHash, + domainId: DomainId, + protocolVersion: ProtocolVersion, + viewType: ViewType, + payload: Payload, + ): RootHashMessage[Payload] = RootHashMessage( + rootHash, + domainId, + viewType, + payload, + )(protocolVersionRepresentativeFor(protocolVersion)) + + def fromProtoV0[Payload <: RootHashMessagePayload]( + payloadDeserializer: ByteString => ParsingResult[Payload] + )( + rootHashMessageP: v0.RootHashMessage + ): ParsingResult[RootHashMessage[Payload]] = { + val v0.RootHashMessage(rootHashP, domainIdP, viewTypeP, payloadP) = rootHashMessageP + for { + rootHash <- RootHash.fromProtoPrimitive(rootHashP) + domainId <- DomainId.fromProtoPrimitive(domainIdP, "domain_id") + viewType <- ViewType.fromProtoEnum(viewTypeP) + payloadO <- payloadDeserializer(payloadP) + } yield RootHashMessage( + rootHash, + domainId, + viewType, + payloadO, + )(protocolVersionRepresentativeFor(ProtoVersion(0))) + } + + implicit def rootHashMessageProtocolMessageContentCast[Payload <: RootHashMessagePayload](implicit + cast: RootHashMessagePayloadCast[Payload] + ): ProtocolMessageContentCast[RootHashMessage[Payload]] = + ProtocolMessageContentCast.create[RootHashMessage[Payload]]("RootHashMessage") { + case rhm: RootHashMessage[_] => rhm.traverse(cast.toKind) + case _ => None + } + + trait RootHashMessagePayloadCast[+Payload <: RootHashMessagePayload] { + def toKind(payload: RootHashMessagePayload): Option[Payload] + } + + def toKind[Payload <: RootHashMessagePayload](payload: RootHashMessagePayload)(implicit + cast: RootHashMessagePayloadCast[Payload] + ): Option[Payload] = cast.toKind(payload) + + def select[Payload <: RootHashMessagePayload](message: RootHashMessage[RootHashMessagePayload])( + implicit cast: RootHashMessagePayloadCast[Payload] + ): Option[RootHashMessage[Payload]] = + message.traverse(toKind(_)) + + override def name: String = "RootHashMessage" +} + +/** Payloads of [[RootHashMessage]] */ +trait RootHashMessagePayload extends PrettyPrinting with HasCryptographicEvidence + +case object EmptyRootHashMessagePayload extends RootHashMessagePayload { + override def pretty: Pretty[EmptyRootHashMessagePayload.type] = prettyOfString(_ => "\"\"") + def fromByteString( + bytes: ByteString + ): ParsingResult[EmptyRootHashMessagePayload.type] = + Either.cond( + bytes.isEmpty, + EmptyRootHashMessagePayload, + ValueDeserializationError("payload", s"expected no payload, but found ${bytes.size} bytes"), + ) + + implicit val emptyRootHashMessagePayloadCast + : RootHashMessagePayloadCast[EmptyRootHashMessagePayload.type] = { + case payload: EmptyRootHashMessagePayload.type => Some(payload) + case _ => None + } + + override def getCryptographicEvidence: ByteString = ByteString.EMPTY +} + +final case class SerializedRootHashMessagePayload(bytes: ByteString) + extends RootHashMessagePayload { + + override def pretty: Pretty[SerializedRootHashMessagePayload] = prettyOfClass( + param("payload size", _.bytes.size) + ) + + override def getCryptographicEvidence: ByteString = bytes +} + +object SerializedRootHashMessagePayload { + def fromByteString( + bytes: ByteString + ): ParsingResult[SerializedRootHashMessagePayload] = + Right( + if (bytes.isEmpty) SerializedRootHashMessagePayload.empty + else SerializedRootHashMessagePayload(bytes) + ) + + val empty: SerializedRootHashMessagePayload = SerializedRootHashMessagePayload(ByteString.EMPTY) + + implicit val serializedRootHashMessagePayloadCast + : RootHashMessagePayloadCast[SerializedRootHashMessagePayload] = { + case serialized: SerializedRootHashMessagePayload => Some(serialized) + case _ => None + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RootHashMessageRecipients.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RootHashMessageRecipients.scala new file mode 100644 index 0000000000..d00b78003c --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RootHashMessageRecipients.scala @@ -0,0 +1,184 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import cats.data.EitherT +import cats.syntax.alternative.* +import cats.syntax.parallel.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.LfPartyId +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.protocol.messages.EncryptedViewMessage.RecipientsInfo +import com.digitalasset.canton.sequencing.protocol.{ + MemberRecipient, + ParticipantsOfParty, + Recipient, + Recipients, + RecipientsTree, +} +import com.digitalasset.canton.topology.client.TopologySnapshot +import com.digitalasset.canton.topology.{MediatorRef, ParticipantId, PartyId} +import com.digitalasset.canton.util.FutureInstances.* + +import scala.concurrent.{ExecutionContext, Future} + +object RootHashMessageRecipients { + + /** Returns a Left if some of the informeeParties don't have active + * participants, in which case the parties with missing active participants are returned. + */ + def encryptedViewMessageRecipientsInfo( + ipsSnapshot: TopologySnapshot, + informeeParties: List[LfPartyId], + )(implicit + ec: ExecutionContext + ): EitherT[Future, Set[LfPartyId], RecipientsInfo] = for { + partiesWithGroupAddressing <- EitherT.right( + ipsSnapshot.partiesWithGroupAddressing(informeeParties) + ) + participantsOfInformessWithoutGroupAddressing <- ipsSnapshot + .activeParticipantsOfAll((informeeParties.toSet -- partiesWithGroupAddressing).toList) + participantsCoveredByGroupAddressing <- ipsSnapshot + .activeParticipantsOfAll(partiesWithGroupAddressing.toList) + } yield RecipientsInfo( + informeeParticipants = + participantsOfInformessWithoutGroupAddressing -- participantsCoveredByGroupAddressing, + partiesWithGroupAddressing.map(PartyId.tryFromLfParty), + participantsCoveredByGroupAddressing, + ) + + def confirmationRequestRootHashMessagesRecipients( + recipientInfos: Seq[RecipientsInfo], + mediator: MediatorRef, + ): List[Recipients] = { + + val participantRecipients = { + val participantsAddressedByGroupAddress = + recipientInfos.toSet.flatMap[ParticipantId]( + _.participantsAddressedByGroupAddress + ) + val allInformeeParticipants = recipientInfos.toSet.flatMap[ParticipantId]( + _.informeeParticipants + ) + allInformeeParticipants -- participantsAddressedByGroupAddress + }.map(MemberRecipient) + + val groupRecipients = recipientInfos.toSet + .flatMap[PartyId](_.partiesWithGroupAddressing) + .map(p => ParticipantsOfParty(p)) + + val recipients = participantRecipients ++ groupRecipients + val groupAddressingBeingUsed = groupRecipients.nonEmpty + + NonEmpty + .from(recipients.toList) + .map { recipientsNE => + if (groupAddressingBeingUsed) { + // if using group addressing, we just place all recipients in one group instead of separately as before (it was separate for legacy reasons) + val mediatorSet: NonEmpty[Set[Recipient]] = NonEmpty.mk(Set, mediator.toRecipient) + Recipients.recipientGroups( + NonEmpty + .mk(Seq, recipientsNE.toSet ++ mediatorSet) + ) + } else + Recipients.recipientGroups( + recipientsNE.map(NonEmpty.mk(Set, _, mediator.toRecipient)) + ) + } + .toList + } + + def recipientsAreValid( + recipients: Recipients, + participantId: ParticipantId, + mediator: MediatorRef, + participantIsAddressByPartyGroupAddress: ( + Seq[LfPartyId], + ParticipantId, + ) => FutureUnlessShutdown[Boolean], + ): FutureUnlessShutdown[Boolean] = + recipients.asSingleGroup match { + case Some(group) => + if (group == NonEmpty.mk(Set, MemberRecipient(participantId), mediator.toRecipient)) + FutureUnlessShutdown.pure(true) + else if (group.contains(mediator.toRecipient) && group.size >= 2) { + val informeeParty = group.collect { case ParticipantsOfParty(party) => + party.toLf + } + if (informeeParty.isEmpty) FutureUnlessShutdown.pure(false) + else + participantIsAddressByPartyGroupAddress( + informeeParty.toSeq, + participantId, + ) + } else FutureUnlessShutdown.pure(false) + case _ => FutureUnlessShutdown.pure(false) + } + + def wrongAndCorrectRecipients( + recipientsList: Seq[Recipients], + mediator: MediatorRef, + ): (Seq[RecipientsTree], Seq[NonEmpty[Set[Recipient]]]) = { + val (wrongRecipients, correctRecipients) = recipientsList.flatMap { recipients => + recipients.trees.toList.map { + case tree @ RecipientsTree(group, Seq()) => + val participantCount = group.count { + case MemberRecipient(_: ParticipantId) => true + case _ => false + } + val groupAddressCount = group.count { + case ParticipantsOfParty(_) => true + case _ => false + } + val groupAddressingBeingUsed = groupAddressCount > 0 + Either.cond( + ((group.size == 2) || (groupAddressingBeingUsed && group.size >= 2)) && group.contains( + mediator.toRecipient + ) && (participantCount + groupAddressCount > 0), + group, + tree, + ) + case badTree => Left(badTree) + } + }.separate + (wrongRecipients, correctRecipients) + } + + def wrongMembers( + rootHashMessagesRecipients: Seq[Recipient], + request: MediatorRequest, + topologySnapshot: TopologySnapshot, + )(implicit executionContext: ExecutionContext): Future[WrongMembers] = { + val informeesAddressedAsGroup = rootHashMessagesRecipients.collect { + case ParticipantsOfParty(informee) => + informee.toLf + } + val participants = rootHashMessagesRecipients.collect { + case MemberRecipient(p: ParticipantId) => p + } + val informeesNotAddressedAsGroups = request.allInformees -- informeesAddressedAsGroup.toSet + val superfluousInformees = informeesAddressedAsGroup.toSet -- request.allInformees + for { + allNonGroupAddressedInformeeParticipants <- + informeesNotAddressedAsGroups.toList + .parTraverse(topologySnapshot.activeParticipantsOf) + .map(_.flatMap(_.keySet).toSet) + participantsAddressedAsGroup <- informeesAddressedAsGroup.toList + .parTraverse(topologySnapshot.activeParticipantsOf) + .map(_.flatMap(_.keySet).toSet) + } yield { + val participantsSet = participants.toSet + val missingInformeeParticipants = + allNonGroupAddressedInformeeParticipants diff participantsSet diff participantsAddressedAsGroup + val superfluousMembers = participantsSet diff allNonGroupAddressedInformeeParticipants + WrongMembers(missingInformeeParticipants, superfluousMembers, superfluousInformees) + } + } + + final case class WrongMembers( + missingInformeeParticipants: Set[ParticipantId], + superfluousMembers: Set[ParticipantId], + superfluousInformees: Set[LfPartyId], + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/SignedProtocolMessage.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/SignedProtocolMessage.scala new file mode 100644 index 0000000000..9f509e7cce --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/SignedProtocolMessage.scala @@ -0,0 +1,229 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import cats.Functor +import cats.data.EitherT +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.crypto.{ + HashPurpose, + Signature, + SignatureCheckError, + SyncCryptoApi, + SyncCryptoError, +} +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.protocol.messages.ProtocolMessage.ProtocolMessageContentCast +import com.digitalasset.canton.protocol.messages.SignedProtocolMessageContent.SignedMessageContentCast +import com.digitalasset.canton.protocol.v1 +import com.digitalasset.canton.sequencing.protocol.ClosedEnvelope +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex +import com.digitalasset.canton.topology.{DomainId, Member} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.{ + HasProtocolVersionedCompanion, + HasProtocolVersionedWrapper, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} +import com.google.common.annotations.VisibleForTesting + +import scala.concurrent.{ExecutionContext, Future} +import scala.math.Ordered.orderingToOrdered + +/** There can be any number of signatures. + * Every signature covers the serialization of the `typedMessage` and needs to be valid. + */ +@SuppressWarnings(Array("org.wartremover.warts.FinalCaseClass")) // This class is mocked in tests +case class SignedProtocolMessage[+M <: SignedProtocolMessageContent]( + typedMessage: TypedSignedProtocolMessageContent[M], + signatures: NonEmpty[Seq[Signature]], +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + SignedProtocolMessage.type + ] +) extends ProtocolMessage + with HasProtocolVersionedWrapper[SignedProtocolMessage[SignedProtocolMessageContent]] { + + @transient override protected lazy val companionObj: SignedProtocolMessage.type = + SignedProtocolMessage + + def message: M = typedMessage.content + + def verifySignature( + snapshot: SyncCryptoApi, + member: Member, + ): EitherT[Future, SignatureCheckError, Unit] = + if ( + representativeProtocolVersion >= + companionObj.protocolVersionRepresentativeFor(ProtocolVersion.v30) + ) { + // TODO(#12390) Properly check the signatures, i.e. there shouldn't be multiple signatures from the same member on the same envelope + ClosedEnvelope.verifySignatures( + snapshot, + member, + typedMessage.getCryptographicEvidence, + signatures, + ) + } else { + val hashPurpose = message.hashPurpose + val hash = snapshot.pureCrypto.digest(hashPurpose, message.getCryptographicEvidence) + snapshot.verifySignatures(hash, member, signatures) + } + + def verifySignature( + snapshot: SyncCryptoApi, + mediatorGroupIndex: MediatorGroupIndex, + )(implicit traceContext: TraceContext): EitherT[Future, SignatureCheckError, Unit] = { + if ( + representativeProtocolVersion >= + companionObj.protocolVersionRepresentativeFor(ProtocolVersion.v30) + ) { + + ClosedEnvelope.verifySignatures( + snapshot, + mediatorGroupIndex, + typedMessage.getCryptographicEvidence, + signatures, + ) + } else { + val hashPurpose = message.hashPurpose + val hash = snapshot.pureCrypto.digest(hashPurpose, message.getCryptographicEvidence) + snapshot.verifySignatures(hash, mediatorGroupIndex, signatures) + } + } + + def copy[MM <: SignedProtocolMessageContent]( + typedMessage: TypedSignedProtocolMessageContent[MM] = this.typedMessage, + signatures: NonEmpty[Seq[Signature]] = this.signatures, + ): SignedProtocolMessage[MM] = + SignedProtocolMessage(typedMessage, signatures)(representativeProtocolVersion) + + override def domainId: DomainId = message.domainId + + protected def toProtoV1: v1.SignedProtocolMessage = { + v1.SignedProtocolMessage( + signature = signatures.map(_.toProtoV0), + typedSignedProtocolMessageContent = typedMessage.toByteString, + ) + } + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + private[SignedProtocolMessage] def traverse[F[_], MM <: SignedProtocolMessageContent]( + f: M => F[MM] + )(implicit F: Functor[F]): F[SignedProtocolMessage[MM]] = { + F.map(typedMessage.traverse(f)) { newTypedMessage => + if (newTypedMessage eq typedMessage) this.asInstanceOf[SignedProtocolMessage[MM]] + else this.copy(typedMessage = newTypedMessage) + } + } + + override def pretty: Pretty[this.type] = + prettyOfClass(unnamedParam(_.message), param("signatures", _.signatures)) +} + +object SignedProtocolMessage + extends HasProtocolVersionedCompanion[SignedProtocolMessage[ + SignedProtocolMessageContent + ]] { + override val name: String = "SignedProtocolMessage" + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter( + ProtocolVersion.v30 + )(v1.SignedProtocolMessage)( + supportedProtoVersion(_)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + def apply[M <: SignedProtocolMessageContent]( + typedMessage: TypedSignedProtocolMessageContent[M], + signatures: NonEmpty[Seq[Signature]], + protocolVersion: ProtocolVersion, + ): SignedProtocolMessage[M] = + SignedProtocolMessage(typedMessage, signatures)( + protocolVersionRepresentativeFor(protocolVersion) + ) + + @VisibleForTesting + def from[M <: SignedProtocolMessageContent]( + message: M, + protocolVersion: ProtocolVersion, + signature: Signature, + moreSignatures: Signature* + ): SignedProtocolMessage[M] = SignedProtocolMessage( + TypedSignedProtocolMessageContent(message, protocolVersion), + NonEmpty(Seq, signature, moreSignatures: _*), + protocolVersion, + ) + + def signAndCreate[M <: SignedProtocolMessageContent]( + message: M, + cryptoApi: SyncCryptoApi, + protocolVersion: ProtocolVersion, + )(implicit + traceContext: TraceContext, + ec: ExecutionContext, + ): EitherT[Future, SyncCryptoError, SignedProtocolMessage[M]] = { + val typedMessage = TypedSignedProtocolMessageContent(message, protocolVersion) + for { + signature <- mkSignature(typedMessage, cryptoApi) + } yield SignedProtocolMessage(typedMessage, NonEmpty(Seq, signature))( + protocolVersionRepresentativeFor(protocolVersion) + ) + } + + @VisibleForTesting + private[canton] def mkSignature[M <: SignedProtocolMessageContent]( + typedMessage: TypedSignedProtocolMessageContent[M], + cryptoApi: SyncCryptoApi, + )(implicit + traceContext: TraceContext + ): EitherT[Future, SyncCryptoError, Signature] = { + val hashPurpose = HashPurpose.SignedProtocolMessageSignature + val serialization = typedMessage.getCryptographicEvidence + + val hash = cryptoApi.pureCrypto.digest(hashPurpose, serialization) + cryptoApi.sign(hash) + } + + def trySignAndCreate[M <: SignedProtocolMessageContent]( + message: M, + cryptoApi: SyncCryptoApi, + protocolVersion: ProtocolVersion, + )(implicit traceContext: TraceContext, ec: ExecutionContext): Future[SignedProtocolMessage[M]] = + signAndCreate(message, cryptoApi, protocolVersion) + .valueOr(err => + throw new IllegalStateException(s"Failed to create signed protocol message: $err") + ) + + private def fromProtoV1( + signedMessageP: v1.SignedProtocolMessage + ): ParsingResult[SignedProtocolMessage[SignedProtocolMessageContent]] = { + val v1.SignedProtocolMessage(signaturesP, typedMessageBytes) = signedMessageP + for { + typedMessage <- TypedSignedProtocolMessageContent.fromByteString(typedMessageBytes) + signatures <- ProtoConverter.parseRequiredNonEmpty( + Signature.fromProtoV0, + "signatures", + signaturesP, + ) + signedMessage = SignedProtocolMessage(typedMessage, signatures)( + protocolVersionRepresentativeFor(ProtoVersion(1)) + ) + } yield signedMessage + } + + implicit def signedMessageCast[M <: SignedProtocolMessageContent](implicit + cast: SignedMessageContentCast[M] + ): ProtocolMessageContentCast[SignedProtocolMessage[M]] = + ProtocolMessageContentCast.create[SignedProtocolMessage[M]](cast.targetKind) { + case sm: SignedProtocolMessage[SignedProtocolMessageContent] => sm.traverse(cast.toKind) + case _ => None + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/SignedProtocolMessageContent.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/SignedProtocolMessageContent.scala new file mode 100644 index 0000000000..d2357a1ce0 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/SignedProtocolMessageContent.scala @@ -0,0 +1,51 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import com.digitalasset.canton.crypto.HashPurpose +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.v0 +import com.digitalasset.canton.serialization.ProtocolVersionedMemoizedEvidence + +trait SignedProtocolMessageContent + extends ProtocolVersionedMemoizedEvidence + with HasDomainId + with PrettyPrinting + with Product + with Serializable { + + /** Converts this object into a [[com.google.protobuf.ByteString]] using [[com.digitalasset.canton.serialization.ProtocolVersionedMemoizedEvidence.getCryptographicEvidence]] + * and wraps the result in the appropriate [[com.digitalasset.canton.protocol.v0.TypedSignedProtocolMessageContent.SomeSignedProtocolMessage]] constructor. + */ + protected[messages] def toProtoTypedSomeSignedProtocolMessage + : v0.TypedSignedProtocolMessageContent.SomeSignedProtocolMessage + + /** Hash purpose that uniquely identifies the type of message content to be signed. */ + def hashPurpose: HashPurpose + + /** The timestamp of the [[com.digitalasset.canton.crypto.SyncCryptoApi]] used for signing this message. + */ + def signingTimestamp: CantonTimestamp + + override def pretty: Pretty[this.type] = prettyOfObject[SignedProtocolMessageContent] +} + +object SignedProtocolMessageContent { + trait SignedMessageContentCast[A] { + def toKind(content: SignedProtocolMessageContent): Option[A] + + def targetKind: String + } + + object SignedMessageContentCast { + def create[A](name: String)( + cast: SignedProtocolMessageContent => Option[A] + ): SignedMessageContentCast[A] = new SignedMessageContentCast[A] { + override def toKind(content: SignedProtocolMessageContent): Option[A] = cast(content) + + override def targetKind: String = name + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TransactionResultMessage.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TransactionResultMessage.scala new file mode 100644 index 0000000000..18f9a3575c --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TransactionResultMessage.scala @@ -0,0 +1,139 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import com.digitalasset.canton.crypto.HashPurpose +import com.digitalasset.canton.data.ViewType.TransactionViewType +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.messages.SignedProtocolMessageContent.SignedMessageContentCast +import com.digitalasset.canton.protocol.{RequestId, RootHash, v0, v3} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.DomainId +import com.digitalasset.canton.version.* +import com.google.protobuf.ByteString + +/** Transaction result message that the mediator sends to all stakeholders of a confirmation request with its verdict. + * https://engineering.da-int.net/docs/platform-architecture-handbook/arch/canton/transactions.html#phase-6-broadcast-of-result + * + * @param requestId identifier of the confirmation request + * @param verdict the finalized verdict on the request + */ +@SuppressWarnings(Array("org.wartremover.warts.FinalCaseClass")) // This class is mocked in tests +case class TransactionResultMessage private ( + override val requestId: RequestId, + override val verdict: Verdict, + rootHash: RootHash, + override val domainId: DomainId, +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + TransactionResultMessage.type + ], + override val deserializedFrom: Option[ByteString], +) extends RegularMediatorResult + with HasProtocolVersionedWrapper[TransactionResultMessage] + with PrettyPrinting { + + def copy( + requestId: RequestId = this.requestId, + verdict: Verdict = this.verdict, + rootHash: RootHash = this.rootHash, + domainId: DomainId = this.domainId, + ): TransactionResultMessage = + TransactionResultMessage(requestId, verdict, rootHash, domainId)( + representativeProtocolVersion, + None, + ) + + override def viewType: TransactionViewType = TransactionViewType + + /** Computes the serialization of the object as a [[com.google.protobuf.ByteString]]. + * + * Must meet the contract of [[com.digitalasset.canton.serialization.HasCryptographicEvidence.getCryptographicEvidence]] + * except that when called several times, different [[com.google.protobuf.ByteString]]s may be returned. + */ + override protected[this] def toByteStringUnmemoized: ByteString = + super[HasProtocolVersionedWrapper].toByteString + + @transient override protected lazy val companionObj: TransactionResultMessage.type = + TransactionResultMessage + + protected def toProtoV3: v3.TransactionResultMessage = + v3.TransactionResultMessage( + requestId = Some(requestId.toProtoPrimitive), + verdict = Some(verdict.toProtoV3), + rootHash = rootHash.toProtoPrimitive, + domainId = domainId.toProtoPrimitive, + ) + + override protected[messages] def toProtoTypedSomeSignedProtocolMessage + : v0.TypedSignedProtocolMessageContent.SomeSignedProtocolMessage = + v0.TypedSignedProtocolMessageContent.SomeSignedProtocolMessage.TransactionResult( + getCryptographicEvidence + ) + + override def hashPurpose: HashPurpose = HashPurpose.TransactionResultSignature + + override def pretty: Pretty[TransactionResultMessage] = + prettyOfClass( + param("requestId", _.requestId.unwrap), + param("verdict", _.verdict), + param("rootHash", _.rootHash), + param("domainId", _.domainId), + ) +} + +object TransactionResultMessage + extends HasMemoizedProtocolVersionedWrapperCompanion[ + TransactionResultMessage, + ] { + override val name: String = "TransactionResultMessage" + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(3) -> VersionedProtoConverter(ProtocolVersion.v30)( + v3.TransactionResultMessage + )( + supportedProtoVersionMemoized(_)(fromProtoV3), + _.toProtoV3.toByteString, + ) + ) + + def apply( + requestId: RequestId, + verdict: Verdict, + rootHash: RootHash, + domainId: DomainId, + protocolVersion: ProtocolVersion, + ): TransactionResultMessage = + TransactionResultMessage(requestId, verdict, rootHash, domainId)( + protocolVersionRepresentativeFor(protocolVersion), + None, + ) + + private def fromProtoV3(protoResultMessage: v3.TransactionResultMessage)( + bytes: ByteString + ): ParsingResult[TransactionResultMessage] = { + val v3.TransactionResultMessage(requestIdPO, verdictPO, rootHashP, domainIdP) = + protoResultMessage + for { + requestId <- ProtoConverter + .required("request_id", requestIdPO) + .flatMap(RequestId.fromProtoPrimitive) + transactionResult <- ProtoConverter + .required("verdict", verdictPO) + .flatMap(Verdict.fromProtoV3) + rootHash <- RootHash.fromProtoPrimitive(rootHashP) + domainId <- DomainId.fromProtoPrimitive(domainIdP, "domain_id") + } yield TransactionResultMessage(requestId, transactionResult, rootHash, domainId)( + protocolVersionRepresentativeFor(ProtoVersion(3)), + Some(bytes), + ) + } + + implicit val transactionResultMessageCast: SignedMessageContentCast[TransactionResultMessage] = + SignedMessageContentCast.create[TransactionResultMessage]("TransactionResultMessage") { + case m: TransactionResultMessage => Some(m) + case _ => None + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TransferInMediatorMessage.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TransferInMediatorMessage.scala new file mode 100644 index 0000000000..72e24bf59b --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TransferInMediatorMessage.scala @@ -0,0 +1,130 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import com.digitalasset.canton.LfPartyId +import com.digitalasset.canton.ProtoDeserializationError.OtherError +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.crypto.HashOps +import com.digitalasset.canton.data.{Informee, TransferInViewTree, ViewPosition, ViewType} +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.{DomainId, MediatorRef} +import com.digitalasset.canton.util.EitherUtil +import com.digitalasset.canton.version.Transfer.TargetProtocolVersion +import com.digitalasset.canton.version.{ + HasProtocolVersionedWithContextCompanion, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} + +import java.util.UUID + +/** Message sent to the mediator as part of a transfer-in request + * + * @param tree The transfer-in view tree blinded for the mediator + * @throws java.lang.IllegalArgumentException if the common data is blinded or the view is not blinded + */ +final case class TransferInMediatorMessage(tree: TransferInViewTree) extends MediatorRequest { + + require(tree.commonData.isFullyUnblinded, "The transfer-in common data must be unblinded") + require(tree.view.isBlinded, "The transfer-out view must be blinded") + + private[this] val commonData = tree.commonData.tryUnwrap + + // Align the protocol version with the common data's protocol version + lazy val protocolVersion: TargetProtocolVersion = commonData.targetProtocolVersion + + override lazy val representativeProtocolVersion + : RepresentativeProtocolVersion[TransferInMediatorMessage.type] = + TransferInMediatorMessage.protocolVersionRepresentativeFor(protocolVersion.v) + + override def domainId: DomainId = commonData.targetDomain.unwrap + + override def mediator: MediatorRef = commonData.targetMediator + + override def requestUuid: UUID = commonData.uuid + + override def informeesAndThresholdByViewHash: Map[ViewHash, (Set[Informee], NonNegativeInt)] = { + val confirmingParties = commonData.confirmingParties + val threshold = NonNegativeInt.tryCreate(confirmingParties.size) + Map(tree.viewHash -> ((confirmingParties, threshold))) + } + + override def informeesAndThresholdByViewPosition + : Map[ViewPosition, (Set[Informee], NonNegativeInt)] = { + val confirmingParties = commonData.confirmingParties + val threshold = NonNegativeInt.tryCreate(confirmingParties.size) + Map(tree.viewPosition -> ((confirmingParties, threshold))) + } + + override def minimumThreshold(informees: Set[Informee]): NonNegativeInt = NonNegativeInt.one + + override def createMediatorResult( + requestId: RequestId, + verdict: Verdict, + recipientParties: Set[LfPartyId], + ): MediatorResult with SignedProtocolMessageContent = { + val informees = commonData.stakeholders + require( + recipientParties.subsetOf(informees), + "Recipient parties of the transfer-in result must be stakeholders.", + ) + TransferResult.create( + requestId, + informees, + commonData.targetDomain, + verdict, + protocolVersion.v, + ) + } + + override def toProtoSomeEnvelopeContentV4: v4.EnvelopeContent.SomeEnvelopeContent = + v4.EnvelopeContent.SomeEnvelopeContent.TransferInMediatorMessage(toProtoV1) + + def toProtoV1: v1.TransferInMediatorMessage = + v1.TransferInMediatorMessage(tree = Some(tree.toProtoV1)) + + override def rootHash: Option[RootHash] = Some(tree.rootHash) + + override def viewType: ViewType = ViewType.TransferInViewType + + override def pretty: Pretty[TransferInMediatorMessage] = prettyOfClass(unnamedParam(_.tree)) + + @transient override protected lazy val companionObj: TransferInMediatorMessage.type = + TransferInMediatorMessage +} + +object TransferInMediatorMessage + extends HasProtocolVersionedWithContextCompanion[TransferInMediatorMessage, HashOps] { + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)(v1.TransferInMediatorMessage)( + supportedProtoVersion(_)((hashOps, proto) => fromProtoV1(hashOps)(proto)), + _.toProtoV1.toByteString, + ) + ) + + def fromProtoV1(hashOps: HashOps)( + transferInMediatorMessageP: v1.TransferInMediatorMessage + ): ParsingResult[TransferInMediatorMessage] = + for { + tree <- ProtoConverter + .required("TransferInMediatorMessage.tree", transferInMediatorMessageP.tree) + .flatMap(TransferInViewTree.fromProtoV1(hashOps, _)) + _ <- EitherUtil.condUnitE( + tree.commonData.isFullyUnblinded, + OtherError(s"Transfer-in common data is blinded in request ${tree.rootHash}"), + ) + _ <- EitherUtil.condUnitE( + tree.view.isBlinded, + OtherError(s"Transfer-in view data is not blinded in request ${tree.rootHash}"), + ) + } yield TransferInMediatorMessage(tree) + + override def name: String = "TransferInMediatorMessage" +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TransferOutMediatorMessage.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TransferOutMediatorMessage.scala new file mode 100644 index 0000000000..6173c9a80d --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TransferOutMediatorMessage.scala @@ -0,0 +1,132 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import com.digitalasset.canton.LfPartyId +import com.digitalasset.canton.ProtoDeserializationError.OtherError +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.crypto.HashOps +import com.digitalasset.canton.data.{Informee, TransferOutViewTree, ViewPosition, ViewType} +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.{DomainId, MediatorRef} +import com.digitalasset.canton.util.EitherUtil +import com.digitalasset.canton.version.{ + HasProtocolVersionedWithContextCompanion, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} + +import java.util.UUID + +/** Message sent to the mediator as part of a transfer-out request + * + * @param tree The transfer-out view tree blinded for the mediator + * @throws java.lang.IllegalArgumentException if the common data is blinded or the view is not blinded + */ +final case class TransferOutMediatorMessage( + tree: TransferOutViewTree +) extends MediatorRequest + with UnsignedProtocolMessage { + require(tree.commonData.isFullyUnblinded, "The transfer-out common data must be unblinded") + require(tree.view.isBlinded, "The transfer-out view must be blinded") + + private[this] val commonData = tree.commonData.tryUnwrap + + val protocolVersion = commonData.protocolVersion + + override val representativeProtocolVersion + : RepresentativeProtocolVersion[TransferOutMediatorMessage.type] = + TransferOutMediatorMessage.protocolVersionRepresentativeFor(protocolVersion.v) + + override def domainId: DomainId = commonData.sourceDomain.unwrap + + override def mediator: MediatorRef = commonData.sourceMediator + + override def requestUuid: UUID = commonData.uuid + + override def informeesAndThresholdByViewHash: Map[ViewHash, (Set[Informee], NonNegativeInt)] = { + val confirmingParties = commonData.confirmingParties + val threshold = NonNegativeInt.tryCreate(confirmingParties.size) + Map(tree.viewHash -> ((confirmingParties, threshold))) + } + + override def informeesAndThresholdByViewPosition + : Map[ViewPosition, (Set[Informee], NonNegativeInt)] = { + val confirmingParties = commonData.confirmingParties + val threshold = NonNegativeInt.tryCreate(confirmingParties.size) + Map(tree.viewPosition -> ((confirmingParties, threshold))) + } + + override def minimumThreshold(informees: Set[Informee]): NonNegativeInt = NonNegativeInt.one + + override def createMediatorResult( + requestId: RequestId, + verdict: Verdict, + recipientParties: Set[LfPartyId], + ): MediatorResult with SignedProtocolMessageContent = { + val informees = commonData.stakeholders ++ commonData.adminParties + require( + recipientParties.subsetOf(informees), + "Recipient parties of the transfer-out result are neither stakeholders nor admin parties", + ) + TransferResult.create( + requestId, + informees, + commonData.sourceDomain, + verdict, + protocolVersion.v, + ) + } + + def toProtoV1: v1.TransferOutMediatorMessage = + v1.TransferOutMediatorMessage(tree = Some(tree.toProtoV1)) + + override def toProtoSomeEnvelopeContentV4: v4.EnvelopeContent.SomeEnvelopeContent = + v4.EnvelopeContent.SomeEnvelopeContent.TransferOutMediatorMessage(toProtoV1) + + override def rootHash: Option[RootHash] = Some(tree.rootHash) + + override def viewType: ViewType = ViewType.TransferOutViewType + + override def pretty: Pretty[TransferOutMediatorMessage] = prettyOfClass(unnamedParam(_.tree)) + + @transient override protected lazy val companionObj: TransferOutMediatorMessage.type = + TransferOutMediatorMessage +} + +object TransferOutMediatorMessage + extends HasProtocolVersionedWithContextCompanion[TransferOutMediatorMessage, HashOps] { + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)( + v1.TransferOutMediatorMessage + )( + supportedProtoVersion(_)((hashOps, proto) => fromProtoV1(hashOps)(proto)), + _.toProtoV1.toByteString, + ) + ) + + def fromProtoV1(hashOps: HashOps)( + transferOutMediatorMessageP: v1.TransferOutMediatorMessage + ): ParsingResult[TransferOutMediatorMessage] = + for { + tree <- ProtoConverter + .required("TransferOutMediatorMessage.tree", transferOutMediatorMessageP.tree) + .flatMap(TransferOutViewTree.fromProtoV1(hashOps)) + _ <- EitherUtil.condUnitE( + tree.commonData.isFullyUnblinded, + OtherError(s"Transfer-out common data is blinded in request ${tree.rootHash}"), + ) + _ <- EitherUtil.condUnitE( + tree.view.isBlinded, + OtherError(s"Transfer-out view data is not blinded in request ${tree.rootHash}"), + ) + } yield TransferOutMediatorMessage(tree) + + override def name: String = "TransferOutMediatorMessage" +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TransferResult.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TransferResult.scala new file mode 100644 index 0000000000..e13a3f55a9 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TransferResult.scala @@ -0,0 +1,239 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import cats.Functor +import cats.syntax.either.* +import cats.syntax.functorFilter.* +import cats.syntax.traverse.* +import com.digitalasset.canton.LfPartyId +import com.digitalasset.canton.ProtoDeserializationError.FieldNotSet +import com.digitalasset.canton.crypto.HashPurpose +import com.digitalasset.canton.data.ViewType +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.TransferDomainId.TransferDomainIdCast +import com.digitalasset.canton.protocol.messages.DeliveredTransferOutResult.InvalidTransferOutResult +import com.digitalasset.canton.protocol.messages.SignedProtocolMessageContent.SignedMessageContentCast +import com.digitalasset.canton.protocol.{ + RequestId, + SourceDomainId, + TargetDomainId, + TransferDomainId, + TransferId, + v0, + v3, +} +import com.digitalasset.canton.sequencing.RawProtocolEvent +import com.digitalasset.canton.sequencing.protocol.{Batch, Deliver, EventWithErrors, SignedContent} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.DomainId +import com.digitalasset.canton.version.* +import com.google.protobuf.ByteString + +/** Mediator result for a transfer request + * + * @param requestId timestamp of the corresponding [[TransferOutRequest]] on the source domain + */ +@SuppressWarnings(Array("org.wartremover.warts.FinalCaseClass")) // This class is mocked in tests +case class TransferResult[+Domain <: TransferDomainId] private ( + override val requestId: RequestId, + informees: Set[LfPartyId], + domain: Domain, // For transfer-out, this is the source domain. For transfer-in, this is the target domain. + override val verdict: Verdict, +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[TransferResult.type], + override val deserializedFrom: Option[ByteString], +) extends RegularMediatorResult + with HasProtocolVersionedWrapper[TransferResult[TransferDomainId]] + with PrettyPrinting { + + override def domainId: DomainId = domain.unwrap + + override def viewType: ViewType = domain.toViewType + + override protected[messages] def toProtoTypedSomeSignedProtocolMessage + : v0.TypedSignedProtocolMessageContent.SomeSignedProtocolMessage = + v0.TypedSignedProtocolMessageContent.SomeSignedProtocolMessage.TransferResult( + getCryptographicEvidence + ) + + @transient override protected lazy val companionObj: TransferResult.type = TransferResult + + private def toProtoV3: v3.TransferResult = { + val domainP = (domain: @unchecked) match { + case SourceDomainId(domainId) => + v3.TransferResult.Domain.SourceDomain(domainId.toProtoPrimitive) + case TargetDomainId(domainId) => + v3.TransferResult.Domain.TargetDomain(domainId.toProtoPrimitive) + } + v3.TransferResult( + requestId = Some(requestId.toProtoPrimitive), + domain = domainP, + informees = informees.toSeq, + verdict = Some(verdict.toProtoV3), + ) + } + + override protected[this] def toByteStringUnmemoized: ByteString = + super[HasProtocolVersionedWrapper].toByteString + + override def hashPurpose: HashPurpose = HashPurpose.TransferResultSignature + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + private[TransferResult] def traverse[F[_], Domain2 <: TransferDomainId]( + f: Domain => F[Domain2] + )(implicit F: Functor[F]): F[TransferResult[Domain2]] = + F.map(f(domain)) { newDomain => + if (newDomain eq domain) this.asInstanceOf[TransferResult[Domain2]] + else if (newDomain == domain) + TransferResult(requestId, informees, newDomain, verdict)( + representativeProtocolVersion, + deserializedFrom, + ) + else + TransferResult(requestId, informees, newDomain, verdict)( + representativeProtocolVersion, + None, + ) + } + + override def pretty: Pretty[TransferResult[_ <: TransferDomainId]] = + prettyOfClass( + param("requestId", _.requestId.unwrap), + param("verdict", _.verdict), + param("informees", _.informees), + param("domain", _.domain), + ) +} + +object TransferResult + extends HasMemoizedProtocolVersionedWrapperCompanion[TransferResult[TransferDomainId]] { + override val name: String = "TransferResult" + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(3) -> VersionedProtoConverter(ProtocolVersion.v30)(v3.TransferResult)( + supportedProtoVersionMemoized(_)(fromProtoV3), + _.toProtoV3.toByteString, + ) + ) + + def create[Domain <: TransferDomainId]( + requestId: RequestId, + informees: Set[LfPartyId], + domain: Domain, + verdict: Verdict, + protocolVersion: ProtocolVersion, + ): TransferResult[Domain] = + TransferResult[Domain](requestId, informees, domain, verdict)( + protocolVersionRepresentativeFor(protocolVersion), + None, + ) + + private def fromProtoV3(transferResultP: v3.TransferResult)( + bytes: ByteString + ): ParsingResult[TransferResult[TransferDomainId]] = { + val v3.TransferResult(maybeRequestIdPO, domainP, informeesP, verdictPO) = transferResultP + import v3.TransferResult.Domain + for { + requestId <- ProtoConverter + .required("TransferOutResult.requestId", maybeRequestIdPO) + .flatMap(RequestId.fromProtoPrimitive) + domain <- domainP match { + case Domain.SourceDomain(sourceDomain) => + DomainId + .fromProtoPrimitive(sourceDomain, "TransferResult.sourceDomain") + .map(SourceDomainId(_)) + case Domain.TargetDomain(targetDomain) => + DomainId + .fromProtoPrimitive(targetDomain, "TransferResult.targetDomain") + .map(TargetDomainId(_)) + case Domain.Empty => Left(FieldNotSet("TransferResponse.domain")) + } + informees <- informeesP.traverse(ProtoConverter.parseLfPartyId) + verdict <- ProtoConverter + .required("TransferResult.verdict", verdictPO) + .flatMap(Verdict.fromProtoV3) + } yield TransferResult(requestId, informees.toSet, domain, verdict)( + protocolVersionRepresentativeFor(ProtoVersion(3)), + Some(bytes), + ) + } + + implicit def transferResultCast[Kind <: TransferDomainId](implicit + cast: TransferDomainIdCast[Kind] + ): SignedMessageContentCast[TransferResult[Kind]] = + SignedMessageContentCast.create[TransferResult[Kind]]("TransferResult") { + case result: TransferResult[TransferDomainId] => result.traverse(cast.toKind) + case _ => None + } +} + +final case class DeliveredTransferOutResult(result: SignedContent[Deliver[DefaultOpenEnvelope]]) + extends PrettyPrinting { + + val unwrap: TransferOutResult = result.content match { + case Deliver(_, _, _, _, Batch(envelopes)) => + val transferOutResults = + envelopes.mapFilter(ProtocolMessage.select[SignedProtocolMessage[TransferOutResult]]) + val size = transferOutResults.size + if (size != 1) + throw InvalidTransferOutResult( + result.content, + s"The deliver event must contain exactly one transfer-out result, but found $size.", + ) + transferOutResults(0).protocolMessage.message + } + + unwrap.verdict match { + case _: Verdict.Approve => () + case _: Verdict.MediatorReject | _: Verdict.ParticipantReject => + throw InvalidTransferOutResult(result.content, "The transfer-out result must be approving.") + } + + def transferId: TransferId = TransferId(unwrap.domain, unwrap.requestId.unwrap) + + override def pretty: Pretty[DeliveredTransferOutResult] = prettyOfParam(_.unwrap) +} + +object DeliveredTransferOutResult { + + final case class InvalidTransferOutResult( + transferOutResult: RawProtocolEvent, + message: String, + ) extends RuntimeException(s"$message: $transferOutResult") + + def create( + resultE: Either[ + EventWithErrors[Deliver[DefaultOpenEnvelope]], + SignedContent[RawProtocolEvent], + ] + ): Either[InvalidTransferOutResult, DeliveredTransferOutResult] = + for { + // The event signature would be invalid if some envelopes could not be opened upstream. + // However, this should not happen, because transfer out messages are sent by the mediator, + // who is trusted not to send bad envelopes. + result <- resultE match { + case Left(eventWithErrors) => + Left( + InvalidTransferOutResult( + eventWithErrors.content, + "Result event contains envelopes that could not be deserialized.", + ) + ) + case Right(event) => Right(event) + } + castToDeliver <- result + .traverse(Deliver.fromSequencedEvent) + .toRight( + InvalidTransferOutResult( + result.content, + "Only a Deliver event contains a transfer-out result.", + ) + ) + deliveredTransferOutResult <- Either.catchOnly[InvalidTransferOutResult] { + DeliveredTransferOutResult(castToDeliver) + } + } yield deliveredTransferOutResult +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TypedSignedProtocolMessageContent.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TypedSignedProtocolMessageContent.scala new file mode 100644 index 0000000000..03c232bfd4 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/TypedSignedProtocolMessageContent.scala @@ -0,0 +1,115 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import cats.Functor +import com.digitalasset.canton.ProtoDeserializationError.OtherError +import com.digitalasset.canton.protocol.v0 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.ProtocolVersionedMemoizedEvidence +import com.digitalasset.canton.version.* +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.ByteString + +@SuppressWarnings(Array("org.wartremover.warts.FinalCaseClass")) // This class is mocked in tests +case class TypedSignedProtocolMessageContent[+M <: SignedProtocolMessageContent] private ( + content: M +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + TypedSignedProtocolMessageContent.type + ], + override val deserializedFrom: Option[ByteString], +) extends HasProtocolVersionedWrapper[ + TypedSignedProtocolMessageContent[SignedProtocolMessageContent] + ] + with ProtocolVersionedMemoizedEvidence { + + @transient override protected lazy val companionObj: TypedSignedProtocolMessageContent.type = + TypedSignedProtocolMessageContent + + override protected[this] def toByteStringUnmemoized: ByteString = + super[HasProtocolVersionedWrapper].toByteString + + private def toProtoV0: v0.TypedSignedProtocolMessageContent = + v0.TypedSignedProtocolMessageContent( + someSignedProtocolMessage = content.toProtoTypedSomeSignedProtocolMessage + ) + + @VisibleForTesting + def copy[MM <: SignedProtocolMessageContent]( + content: MM = this.content + ): TypedSignedProtocolMessageContent[MM] = + TypedSignedProtocolMessageContent(content)(representativeProtocolVersion, None) + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + private[messages] def traverse[F[_], MM <: SignedProtocolMessageContent]( + f: M => F[MM] + )(implicit F: Functor[F]): F[TypedSignedProtocolMessageContent[MM]] = { + F.map(f(content)) { newContent => + if (newContent eq content) this.asInstanceOf[TypedSignedProtocolMessageContent[MM]] + else + TypedSignedProtocolMessageContent(newContent)( + representativeProtocolVersion, + deserializedFrom, + ) + } + } +} + +object TypedSignedProtocolMessageContent + extends HasMemoizedProtocolVersionedWrapperCompanion[ + TypedSignedProtocolMessageContent[SignedProtocolMessageContent] + ] { + override def name: String = "TypedSignedProtocolMessageContent" + + override def supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> VersionedProtoConverter( + ProtocolVersion.v30 + )(v0.TypedSignedProtocolMessageContent)( + supportedProtoVersionMemoized(_)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + def apply[M <: SignedProtocolMessageContent]( + content: M, + protocolVersion: ProtocolVersion, + ): TypedSignedProtocolMessageContent[M] = + TypedSignedProtocolMessageContent(content)( + protocolVersionRepresentativeFor(protocolVersion), + None, + ) + + def apply[M <: SignedProtocolMessageContent]( + content: M, + protoVersion: ProtoVersion, + ): TypedSignedProtocolMessageContent[M] = + TypedSignedProtocolMessageContent(content)(protocolVersionRepresentativeFor(protoVersion), None) + + private def fromProtoV0(proto: v0.TypedSignedProtocolMessageContent)( + bytes: ByteString + ): ParsingResult[TypedSignedProtocolMessageContent[SignedProtocolMessageContent]] = { + import v0.TypedSignedProtocolMessageContent.SomeSignedProtocolMessage as Sm + val v0.TypedSignedProtocolMessageContent(messageBytes) = proto + for { + message <- (messageBytes match { + case Sm.MediatorResponse(mediatorResponseBytes) => + MediatorResponse.fromByteString(mediatorResponseBytes) + case Sm.TransactionResult(transactionResultMessageBytes) => + TransactionResultMessage.fromByteString(transactionResultMessageBytes) + case Sm.TransferResult(transferResultBytes) => + TransferResult.fromByteString(transferResultBytes) + case Sm.AcsCommitment(acsCommitmentBytes) => + AcsCommitment.fromByteString(acsCommitmentBytes) + case Sm.MalformedMediatorRequestResult(malformedMediatorRequestResultBytes) => + MalformedMediatorRequestResult.fromByteString(malformedMediatorRequestResultBytes) + case Sm.Empty => + Left(OtherError("Deserialization of a SignedMessage failed due to a missing message")) + }): ParsingResult[SignedProtocolMessageContent] + } yield TypedSignedProtocolMessageContent(message)( + protocolVersionRepresentativeFor(ProtoVersion(0)), + Some(bytes), + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/Verdict.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/Verdict.scala new file mode 100644 index 0000000000..5b41234e32 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/Verdict.scala @@ -0,0 +1,219 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol.messages + +import cats.syntax.traverse.* +import com.daml.error.ContextualizedErrorLogger +import com.daml.error.utils.DeserializedCantonError +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.LfPartyId +import com.digitalasset.canton.ProtoDeserializationError.{InvariantViolation, OtherError} +import com.digitalasset.canton.error.* +import com.digitalasset.canton.logging.ErrorLoggingContext +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.* +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.version.* +import com.google.protobuf.empty +import com.google.rpc.status.Status +import pprint.Tree + +import scala.Ordered.orderingToOrdered + +trait TransactionRejection { + def logWithContext(extra: Map[String, String] = Map())(implicit + contextualizedErrorLogger: ContextualizedErrorLogger + ): Unit + + def rpcStatusWithoutLoggingContext(): com.google.rpc.status.Status +} + +/** Verdicts sent from the mediator to the participants inside the [[MediatorResult]] message */ +sealed trait Verdict + extends Product + with Serializable + with PrettyPrinting + with HasProtocolVersionedWrapper[Verdict] { + + /** Whether the verdict represents a timeout that the mediator has determined. */ + def isTimeoutDeterminedByMediator: Boolean + + @transient override protected lazy val companionObj: Verdict.type = Verdict + + private[messages] def toProtoV3: v3.Verdict +} + +object Verdict + extends HasProtocolVersionedCompanion[Verdict] + with ProtocolVersionedCompanionDbHelpers[Verdict] { + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(3) -> VersionedProtoConverter(ProtocolVersion.v30)(v3.Verdict)( + supportedProtoVersion(_)(fromProtoV3), + _.toProtoV3.toByteString, + ) + ) + + final case class Approve()( + override val representativeProtocolVersion: RepresentativeProtocolVersion[Verdict.type] + ) extends Verdict { + override def isTimeoutDeterminedByMediator: Boolean = false + + private[messages] override def toProtoV3: v3.Verdict = + v3.Verdict(someVerdict = v3.Verdict.SomeVerdict.Approve(empty.Empty())) + + override def pretty: Pretty[Verdict] = prettyOfString(_ => "Approve") + } + + object Approve { + def apply(protocolVersion: ProtocolVersion): Approve = Approve()( + Verdict.protocolVersionRepresentativeFor(protocolVersion) + ) + } + + final case class MediatorReject private (status: com.google.rpc.status.Status)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[Verdict.type] + ) extends Verdict + with TransactionRejection { + require(status.code != com.google.rpc.Code.OK_VALUE, "Rejection must not use status code OK") + + private[messages] override def toProtoV3: v3.Verdict = + v3.Verdict(v3.Verdict.SomeVerdict.MediatorReject(toProtoMediatorRejectV2)) + + def toProtoMediatorRejectV2: v2.MediatorReject = v2.MediatorReject(Some(status)) + + override def pretty: Pretty[MediatorReject.this.type] = prettyOfClass( + unnamedParam(_.status) + ) + + override def logWithContext(extra: Map[String, String])(implicit + contextualizedErrorLogger: ContextualizedErrorLogger + ): Unit = + DeserializedCantonError.fromGrpcStatus(status) match { + case Right(error) => error.logWithContext(extra) + case Left(err) => + contextualizedErrorLogger.warn(s"Failed to parse mediator rejection: $err") + } + + override def rpcStatusWithoutLoggingContext(): Status = status + + override def isTimeoutDeterminedByMediator: Boolean = + DeserializedCantonError.fromGrpcStatus(status).exists(_.code.id == MediatorError.Timeout.id) + } + + object MediatorReject { + // TODO(#15628) Make it safe (intercept the exception and return an either) + def tryCreate( + status: com.google.rpc.status.Status, + protocolVersion: ProtocolVersion, + ): MediatorReject = + MediatorReject(status)(Verdict.protocolVersionRepresentativeFor(protocolVersion)) + + private[messages] def fromProtoV2( + mediatorRejectP: v2.MediatorReject + ): ParsingResult[MediatorReject] = { + // Proto version 3 because mediator rejections are versioned according to verdicts + // and verdicts use mediator reject V2 in proto version 3. + val representativeProtocolVersion = protocolVersionRepresentativeFor(ProtoVersion(3)) + + val v2.MediatorReject(statusO) = mediatorRejectP + for { + status <- ProtoConverter.required("rejection_reason", statusO) + } yield MediatorReject(status)(representativeProtocolVersion) + } + } + + /** @param reasons Mapping from the parties of a [[com.digitalasset.canton.protocol.messages.MediatorResponse]] + * to the rejection reason from the [[com.digitalasset.canton.protocol.messages.MediatorResponse]] + */ + final case class ParticipantReject(reasons: NonEmpty[List[(Set[LfPartyId], LocalReject)]])( + override val representativeProtocolVersion: RepresentativeProtocolVersion[Verdict.type] + ) extends Verdict { + + private[messages] override def toProtoV3: v3.Verdict = { + val reasonsP = v2.ParticipantReject(reasons.map { case (parties, message) => + v2.RejectionReason(parties.toSeq, Some(message.toLocalRejectProtoV1)) + }) + v3.Verdict(someVerdict = v3.Verdict.SomeVerdict.ParticipantReject(reasonsP)) + } + + override def pretty: Pretty[ParticipantReject] = { + import Pretty.PrettyOps + + prettyOfClass( + unnamedParam( + _.reasons.map { case (parties, reason) => + Tree.Infix(reason.toTree, "- reported by:", parties.toTree) + } + ) + ) + } + + /** Returns the rejection reason with the highest [[com.daml.error.ErrorCategory]] */ + def keyEvent(implicit loggingContext: ErrorLoggingContext): LocalReject = { + if (reasons.lengthCompare(1) > 0) { + val message = show"Request was rejected with multiple reasons. $reasons" + loggingContext.logger.info(message)(loggingContext.traceContext) + } + reasons.map { case (_, localReject) => localReject }.maxBy1(_.code.category) + } + + override def isTimeoutDeterminedByMediator: Boolean = false + } + + object ParticipantReject { + def apply( + reasons: NonEmpty[List[(Set[LfPartyId], LocalReject)]], + protocolVersion: ProtocolVersion, + ): ParticipantReject = + ParticipantReject(reasons)(Verdict.protocolVersionRepresentativeFor(protocolVersion)) + + private def fromProtoRejectionReasonsV2( + reasonsP: Seq[v2.RejectionReason], + pv: RepresentativeProtocolVersion[Verdict.type], + ): ParsingResult[ParticipantReject] = + for { + reasons <- reasonsP.traverse(fromProtoReasonV2) + reasonsNE <- NonEmpty + .from(reasons.toList) + .toRight(InvariantViolation("Field reasons must not be empty!")) + } yield ParticipantReject(reasonsNE)(pv) + + def fromProtoV2( + participantRejectP: v2.ParticipantReject, + pv: RepresentativeProtocolVersion[Verdict.type], + ): ParsingResult[ParticipantReject] = { + val v2.ParticipantReject(reasonsP) = participantRejectP + fromProtoRejectionReasonsV2(reasonsP, pv) + } + } + + override def name: String = "verdict" + + def fromProtoV3(verdictP: v3.Verdict): ParsingResult[Verdict] = { + val v3.Verdict(someVerdictP) = verdictP + import v3.Verdict.{SomeVerdict as V} + + val representativeProtocolVersion = protocolVersionRepresentativeFor(ProtoVersion(3)) + someVerdictP match { + case V.Approve(empty.Empty(_)) => Right(Approve()(representativeProtocolVersion)) + case V.MediatorReject(mediatorRejectP) => + MediatorReject.fromProtoV2(mediatorRejectP) + case V.ParticipantReject(participantRejectP) => + ParticipantReject.fromProtoV2(participantRejectP, representativeProtocolVersion) + case V.Empty => Left(OtherError("empty verdict type")) + } + } + + private def fromProtoReasonV2( + protoReason: v2.RejectionReason + ): ParsingResult[(Set[LfPartyId], LocalReject)] = { + val v2.RejectionReason(partiesP, messageP) = protoReason + for { + parties <- partiesP.traverse(ProtoConverter.parseLfPartyId).map(_.toSet) + message <- ProtoConverter.parseRequired(LocalReject.fromProtoV1, "reject", messageP) + } yield (parties, message) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/package.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/package.scala new file mode 100644 index 0000000000..fec82a7492 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/package.scala @@ -0,0 +1,35 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protocol + +import com.digitalasset.canton.data.ViewType.TransactionViewType +import com.digitalasset.canton.sequencing.protocol.{AllMembersOfDomain, OpenEnvelope} + +/** This package contains data structures used in the transaction protocol. + * However, generic data structures, e.g. [[com.digitalasset.canton.data.MerkleTree]] etc. are + * kept in [[com.digitalasset.canton.data]] package. + */ +package object messages { + + type TransferOutResult = TransferResult[SourceDomainId] + val TransferOutResult: TransferResult.type = TransferResult + + type TransferInResult = TransferResult[TargetDomainId] + val TransferInResult: TransferResult.type = TransferResult + + type TransactionViewMessage = EncryptedViewMessage[TransactionViewType] + + type DefaultOpenEnvelope = OpenEnvelope[ProtocolMessage] + object DefaultOpenEnvelopesFilter { + def containsTopologyX(envelopes: Seq[DefaultOpenEnvelope]): Boolean = envelopes.exists { + envelope => + val broadcastO = ProtocolMessage.select[TopologyTransactionsBroadcastX](envelope) + val envelopeIsValidBroadcast = + broadcastO.exists(_.recipients.allRecipients.contains(AllMembersOfDomain)) + + envelopeIsValidBroadcast + } + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/package.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/package.scala new file mode 100644 index 0000000000..dc5cb03b79 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/protocol/package.scala @@ -0,0 +1,121 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton + +import com.daml.lf.crypto.Hash +import com.daml.lf.data.Ref +import com.daml.lf.transaction.* +import com.daml.lf.value.Value +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.data.{RepairContract, ViewType} +import com.digitalasset.canton.protocol.messages.EncryptedViewMessage +import com.digitalasset.canton.sequencing.protocol.OpenEnvelope + +/** Provides shorthands for general purpose types. + *

+ * Most notably, it provides a facade for Daml-LF transactions and nodes. + * By default, code should access Daml-LF transaction and nodes types through this facade. + */ +package object protocol { + + /** Shorthand for Daml-LF contract ids */ + type LfContractId = Value.ContractId + val LfContractId: Value.ContractId.type = Value.ContractId + + type LfNodeId = NodeId + val LfNodeId: NodeId.type = NodeId + + /** Shorthand for Daml-LF transaction wrapped in versioned transaction in turn wrapped in + * committed or submitted transaction + */ + type LfTransaction = Transaction + val LfTransaction: Transaction.type = Transaction + + val LfTransactionErrors: TransactionErrors.type = TransactionErrors + + type LfVersionedTransaction = VersionedTransaction + val LfVersionedTransaction: VersionedTransaction.type = VersionedTransaction + + type LfCommittedTransaction = CommittedTransaction + val LfCommittedTransaction: CommittedTransaction.type = CommittedTransaction + + type LfSubmittedTransaction = SubmittedTransaction + val LfSubmittedTransaction: SubmittedTransaction.type = SubmittedTransaction + + type LfTransactionVersion = TransactionVersion + val LfTransactionVersion: TransactionVersion.type = TransactionVersion + + val DummyTransactionVersion: LfTransactionVersion = TransactionVersion.maxVersion + + // Ledger transaction statistics based on lf transaction nodes + type LedgerTransactionNodeStatistics = TransactionNodeStatistics + val LedgerTransactionNodeStatistics: TransactionNodeStatistics.type = TransactionNodeStatistics + + /** Shorthand for Daml-LF nodes. + * Nodes include `NodeId`s of their children. + * Children need to be looked up in the underlying transaction. + */ + type LfNode = Node + + /** Shorthand for Daml-LF "action" nodes (all node types besides "rollback" nodes) + */ + type LfActionNode = Node.Action + + /** Shorthand for create nodes. */ + type LfNodeCreate = Node.Create + val LfNodeCreate: Node.Create.type = Node.Create + + /** Shorthand for fetch nodes. */ + type LfNodeFetch = Node.Fetch + val LfNodeFetch: Node.Fetch.type = Node.Fetch + + /** Shorthand for exercise nodes. + * Nodes include `NodeId`s of their children. + * Children need to be looked up in the underlying transaction. + */ + type LfNodeExercises = Node.Exercise + val LfNodeExercises: Node.Exercise.type = Node.Exercise + + /** Shorthand for lookup by key nodes. */ + type LfNodeLookupByKey = Node.LookupByKey + val LfNodeLookupByKey: Node.LookupByKey.type = Node.LookupByKey + + /** Shorthand for rollback nodes. */ + type LfNodeRollback = Node.Rollback + val LfNodeRollback: Node.Rollback.type = Node.Rollback + + /** Shorthand for leaf only action nodes. */ + type LfLeafOnlyActionNode = Node.LeafOnlyAction + + /** Shorthand for contract instances. */ + type LfContractInst = Value.VersionedContractInstance + val LfContractInst: Value.VersionedContractInstance.type = Value.VersionedContractInstance + + type LfHash = Hash + val LfHash: Hash.type = Hash + + /** Shorthand for global contract keys (with template_id). */ + type LfGlobalKey = GlobalKey + val LfGlobalKey: GlobalKey.type = GlobalKey + + type LfGlobalKeyWithMaintainers = GlobalKeyWithMaintainers + val LfGlobalKeyWithMaintainers: GlobalKeyWithMaintainers.type = GlobalKeyWithMaintainers + + type LfTemplateId = Ref.TypeConName + val LfTemplateId: Ref.TypeConName.type = Ref.TypeConName + + type LfChoiceName = Ref.ChoiceName + val LfChoiceName: Ref.ChoiceName.type = Ref.ChoiceName + + type RequestProcessor[VT <: ViewType] = + Phase37Processor[RequestAndRootHashMessage[OpenEnvelope[EncryptedViewMessage[VT]]]] + + def maxTransactionVersion(versions: NonEmpty[Seq[LfTransactionVersion]]): LfTransactionVersion = + versions.reduceLeft[LfTransactionVersion](LfTransactionVersion.Ordering.max) + + // Enables backward-compatibility so that existing repair scripts do not break + // TODO(#14441): Remove this alias + type SerializableContractWithWitnesses = RepairContract + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/pruning/PruningPhase.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/pruning/PruningPhase.scala new file mode 100644 index 0000000000..60d2de86d7 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/pruning/PruningPhase.scala @@ -0,0 +1,73 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.pruning + +import com.digitalasset.canton.config.CantonRequireTypes.String100 +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.store.db.DbSerializationException +import slick.jdbc.{GetResult, SetParameter} + +sealed trait PruningPhase extends Product with Serializable with PrettyPrinting { + def kind: String + // lazy val so that `kind` is initialized first in the subclasses + final lazy val toDbPrimitive: String100 = + // The Oracle DB schemas set a limit of 100 characters, Postgres and H2 map it to an enum + String100.tryCreate(kind) + def index: Int + + override def pretty: Pretty[PruningPhase] = prettyOfParam(_.kind.unquoted) +} + +object PruningPhase { + case object Started extends PruningPhase { + override val kind: String = "started" + override def index: Int = 0 + } + case object Completed extends PruningPhase { + override val kind: String = "completed" + override def index: Int = 1 + } + + def tryFromDbPrimitive: String => PruningPhase = { + case Started.kind => Started + case Completed.kind => Completed + case other => throw new DbSerializationException(s"Unknown pruning phase $other") + } + + implicit val orderingPruningPhase: Ordering[PruningPhase] = + Ordering.by[PruningPhase, Int](_.index) + + implicit val getResultPruningPhase: GetResult[PruningPhase] = + GetResult(r => PruningPhase.tryFromDbPrimitive(r.nextString())) + implicit val setParameterPruningPhase: SetParameter[PruningPhase] = (d, pp) => + pp >> d.toDbPrimitive +} + +final case class PruningStatus( + phase: PruningPhase, + timestamp: CantonTimestamp, + lastSuccess: Option[CantonTimestamp], +) extends PrettyPrinting { + override def pretty: Pretty[PruningStatus] = prettyOfClass( + param("phase", _.phase), + param("timestamp", _.timestamp), + param("lastSuccess", _.lastSuccess), + ) +} + +object PruningStatus { + implicit val orderingPruningStatus: Ordering[PruningStatus] = + Ordering.by[PruningStatus, (CantonTimestamp, PruningPhase)](status => + (status.timestamp, status.phase) + ) + + implicit val getResultPruningStatus: GetResult[PruningStatus] = GetResult(r => + PruningStatus( + PruningPhase.getResultPruningPhase(r), + GetResult[CantonTimestamp].apply(r), + GetResult[Option[CantonTimestamp]].apply(r), + ) + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/DatabaseStorageError.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/DatabaseStorageError.scala new file mode 100644 index 0000000000..7a227cdf23 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/DatabaseStorageError.scala @@ -0,0 +1,64 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import com.daml.error.{ErrorCategory, ErrorCode, Explanation, Resolution} +import com.digitalasset.canton.error.CantonError +import com.digitalasset.canton.error.CantonErrorGroups.StorageErrorGroup +import com.digitalasset.canton.logging.ErrorLoggingContext + +object DatabaseStorageError extends StorageErrorGroup { + + @Explanation( + """This error indicates that degradation of database storage components.""" + ) + @Resolution( + s"""This error indicates performance degradation. The error occurs when a database task has been rejected, + |typically due to having a too small task queue. The task will be retried after a delay. + |If this error occurs frequently, however, you may want to consider increasing the task queue. + |(Config parameter: canton..storage.config.queueSize).""" + ) + object DatabaseStorageDegradation + extends ErrorCode( + id = "DB_STORAGE_DEGRADATION", + ErrorCategory.BackgroundProcessDegradationWarning, + ) { + + override protected def exposedViaApi: Boolean = false + + final case class DatabaseTaskRejected(messageFromSlick: String)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = s"""A database task was rejected from the database task queue. + |The full error message from the task queue is: + |$messageFromSlick""".stripMargin + ) {} + } + + @Explanation( + """This error indicates that the connection to the database has been lost.""" + ) + @Resolution("Inspect error message for details.") + object DatabaseConnectionLost + extends ErrorCode( + id = "DB_CONNECTION_LOST", + ErrorCategory.BackgroundProcessDegradationWarning, + ) { + + override protected def exposedViaApi: Boolean = false + + @Resolution( + s"""This error indicates that during a database connection health check it was detected that + | it is not possible to connect to the database. That is, an attempt has been made to connect + | but it either timed out or failed to check that the connection was valid.""".stripMargin + ) + final case class DatabaseConnectionLost(messageFromSlick: String)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + s"Database health check failed to establish a valid connection: $messageFromSlick", + throwableO = None, + ) {} + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/DbMigrations.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/DbMigrations.scala new file mode 100644 index 0000000000..0835bbb208 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/DbMigrations.scala @@ -0,0 +1,345 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.data.EitherT +import cats.syntax.either.* +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.config.{DbConfig, ProcessingTimeout} +import com.digitalasset.canton.environment.CantonNodeParameters +import com.digitalasset.canton.lifecycle.{CloseContext, UnlessShutdown} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.resource.DbStorage.RetryConfig +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.canton.util.retry.RetryEither +import com.digitalasset.canton.util.{LoggerUtil, ResourceUtil} +import org.flywaydb.core.Flyway +import org.flywaydb.core.api.FlywayException +import slick.jdbc.JdbcBackend.Database +import slick.jdbc.hikaricp.HikariCPJdbcDataSource +import slick.jdbc.{DataSourceJdbcDataSource, JdbcBackend, JdbcDataSource} + +import java.sql.SQLException +import javax.sql.DataSource +import scala.concurrent.blocking +import scala.concurrent.duration.Duration + +trait DbMigrationsFactory { + + def create(dbConfig: DbConfig, devVersionSupport: Boolean)(implicit + closeContext: CloseContext + ): DbMigrations + + def create(dbConfig: DbConfig, name: String, devVersionSupport: Boolean)(implicit + closeContext: CloseContext + ): DbMigrations + +} + +trait DbMigrations { this: NamedLogging => + + implicit protected def closeContext: CloseContext + + /** Whether we want to add the schema files found in the dev folder to the migration + * + * A user that does that, won't be able to upgrade to new Canton versions, as we reserve our right to just + * modify the dev version files in any way we like. + */ + protected def devVersionSupport: Boolean + + /** Database is migrated using Flyway, which looks at the migration files at + * src/main/resources/db/migration/canton as explained at https://flywaydb.org/documentation/getstarted/firststeps/api + */ + protected def createFlyway(dataSource: DataSource): Flyway = { + Flyway.configure + .locations(dbConfig.buildMigrationsPaths(devVersionSupport): _*) + .dataSource(dataSource) + .cleanDisabled(!dbConfig.parameters.unsafeCleanOnValidationError) + .cleanOnValidationError(dbConfig.parameters.unsafeCleanOnValidationError) + .baselineOnMigrate(dbConfig.parameters.unsafeBaselineOnMigrate) + .lockRetryCount(60) + .load() + } + + protected def withCreatedDb[A](retryConfig: DbStorage.RetryConfig)( + fn: Database => EitherT[UnlessShutdown, DbMigrations.Error, A] + ): EitherT[UnlessShutdown, DbMigrations.Error, A] = { + DbStorage + .createDatabase( + dbConfig, + PositiveInt.one, + scheduler = None, // no db query deadlock detection here + forMigration = true, + retryConfig = retryConfig, + )(loggerFactory) + .leftMap(DbMigrations.DatabaseError) + .flatMap(db => ResourceUtil.withResource(db)(fn)) + } + + /** Obtain access to the database to run the migration operation. */ + protected def withDb[A]( + retryConfig: DbStorage.RetryConfig = DbStorage.RetryConfig.failFast + )(fn: Database => EitherT[UnlessShutdown, DbMigrations.Error, A])(implicit + traceContext: TraceContext + ): EitherT[UnlessShutdown, DbMigrations.Error, A] + + protected def migrateDatabaseInternal( + flyway: Flyway + )(implicit traceContext: TraceContext): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = { + // Retry the migration in case of failures, which may happen due to a race condition in concurrent migrations + RetryEither.retry[DbMigrations.Error, Unit](10, 100, functionFullName, logger) { + Either + .catchOnly[FlywayException](flyway.migrate()) + .map(r => logger.info(s"Applied ${r.migrationsExecuted} migrations successfully")) + .leftMap(DbMigrations.FlywayError) + } + } + + protected def repairFlywayMigrationInternal( + flyway: Flyway + )(implicit traceContext: TraceContext): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = { + Either + .catchOnly[FlywayException](flyway.repair()) + .map(r => + logger.info( + s"The repair of the Flyway database migration succeeded. This is the Flyway repair report: $r" + ) + ) + .leftMap[DbMigrations.Error](DbMigrations.FlywayError) + .toEitherT[UnlessShutdown] + } + + protected def dbConfig: DbConfig + + /** Migrate the database with all pending migrations. */ + def migrateDatabase(): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = + TraceContext.withNewTraceContext { implicit traceContext => + withDb() { createdDb => + ResourceUtil.withResource(createdDb) { db => + val flyway = createFlyway(DbMigrations.createDataSource(db.source)) + migrateDatabaseInternal(flyway) + } + } + } + + /** Repair the database in case the migrations files changed (e.g. due to comment changes). + * To quote the Flyway documentation: + * {{{ + * Repair is your tool to fix issues with the schema history table. It has a few main uses: + * + * - Remove failed migration entries (only for databases that do NOT support DDL transactions) + * - Realign the checksums, descriptions, and types of the applied migrations with the ones of the available migrations + * - Mark all missing migrations as deleted + * }}} + */ + def repairFlywayMigration(): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = + TraceContext.withNewTraceContext { implicit traceContext => + withFlyway(repairFlywayMigrationInternal) + } + + protected def withFlyway[A]( + fn: Flyway => EitherT[UnlessShutdown, DbMigrations.Error, A] + )(implicit + traceContext: TraceContext + ): EitherT[UnlessShutdown, DbMigrations.Error, A] = + withDb() { createdDb => + ResourceUtil.withResource(createdDb) { db => + val flyway = createFlyway(DbMigrations.createDataSource(db.source)) + fn(flyway) + } + } + + private def connectionCheck( + source: JdbcDataSource, + processingTimeout: ProcessingTimeout, + ): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = { + ResourceUtil + .withResourceEither(source.createConnection()) { conn => + val valid = blocking { + Either.catchOnly[SQLException]( + conn.isValid(processingTimeout.network.duration.toSeconds.toInt) + ) + } + valid + .leftMap(err => show"failed to check connection $err") + .flatMap { valid => + Either.cond( + valid, + (), + "A trial database connection was not valid", + ) + } + .leftMap[DbMigrations.Error](err => DbMigrations.DatabaseError(err)) + } + .valueOr { err => + Left(DbMigrations.DatabaseError(s"failed to create connection ${err.getMessage}")) + } + .toEitherT[UnlessShutdown] + } + + def checkAndMigrate(params: CantonNodeParameters, retryConfig: RetryConfig)(implicit + tc: TraceContext + ): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = { + val standardConfig = !params.nonStandardConfig + val started = System.nanoTime() + withDb(retryConfig) { createdDb => + ResourceUtil.withResource(createdDb) { db => + val flyway = createFlyway(DbMigrations.createDataSource(db.source)) + for { + _ <- connectionCheck(db.source, params.processingTimeouts) + _ <- checkDbVersion(db, params.processingTimeouts, standardConfig) + _ <- + if (params.dbMigrateAndStart) + migrateAndStartInternal(flyway) + else + migrateIfFreshAndCheckPending(flyway) + } yield { + val elapsed = System.nanoTime() - started + logger.debug( + s"Finished setting up database schemas after ${LoggerUtil + .roundDurationForHumans(Duration.fromNanos(elapsed))}" + ) + } + } + } + } + + private def migrateAndStartInternal(flyway: Flyway)(implicit + traceContext: TraceContext + ): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = { + val info = flyway.info() + if (info.pending().nonEmpty) { + logger.info( + s"There are ${info.pending().length} pending migrations for the db that is at version ${info.applied().length}. Performing migration before start." + ) + migrateDatabaseInternal(flyway) + } else { + logger.debug("Db schema is already up to date") + EitherT.rightT(()) + } + } + + private def migrateIfFreshInternal(flyway: Flyway)(implicit + traceContext: TraceContext + ): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = { + if (flyway.info().applied().isEmpty) migrateDatabaseInternal(flyway) + else { + logger.debug("Skip flyway migration on non-empty database") + EitherT.rightT(()) + } + } + + /** Combined method of migrateIfFresh and checkPendingMigration, avoids creating multiple pools */ + private def migrateIfFreshAndCheckPending( + flyway: Flyway + ): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = + TraceContext.withNewTraceContext { implicit traceContext => + for { + _ <- migrateIfFreshInternal(flyway) + _ <- checkPendingMigrationInternal(flyway).toEitherT[UnlessShutdown] + } yield () + } + + private def checkDbVersion( + db: JdbcBackend.Database, + timeouts: ProcessingTimeout, + standardConfig: Boolean, + )(implicit tc: TraceContext): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = { + val check = DbVersionCheck + .dbVersionCheck(timeouts, standardConfig, dbConfig) + check(db).toEitherT[UnlessShutdown] + } + + private def checkPendingMigrationInternal( + flyway: Flyway + ): Either[DbMigrations.Error, Unit] = { + for { + info <- Either + .catchOnly[FlywayException](flyway.info()) + .leftMap(DbMigrations.FlywayError) + pendingMigrations = info.pending() + _ <- + if (pendingMigrations.isEmpty) Right(()) + else { + val currentVersion = Option(info.current()).map(_.getVersion.getVersion) + val lastVersion = pendingMigrations.last.getVersion.getVersion + val pendingMsg = + s"There are ${pendingMigrations.length} pending migrations to get to database schema version $lastVersion" + val msg = + currentVersion.fold(s"No migrations have been applied yet. $pendingMsg.")(version => + s"$pendingMsg. Currently on version $version." + ) + Left(DbMigrations.PendingMigrationError(msg)) + } + } yield () + } + +} + +class CommunityDbMigrationsFactory(loggerFactory: NamedLoggerFactory) extends DbMigrationsFactory { + override def create(dbConfig: DbConfig, name: String, devVersionSupport: Boolean)(implicit + closeContext: CloseContext + ): DbMigrations = + new CommunityDbMigrations( + dbConfig, + devVersionSupport, + loggerFactory.appendUnnamedKey("node", name), + ) + + override def create(dbConfig: DbConfig, devVersionSupport: Boolean)(implicit + closeContext: CloseContext + ): DbMigrations = + new CommunityDbMigrations(dbConfig, devVersionSupport, loggerFactory) +} + +class CommunityDbMigrations( + protected val dbConfig: DbConfig, + protected val devVersionSupport: Boolean, + protected val loggerFactory: NamedLoggerFactory, +)(implicit override protected val closeContext: CloseContext) + extends DbMigrations + with NamedLogging { + + override def withDb[A]( + retryConfig: RetryConfig + )(fn: Database => EitherT[UnlessShutdown, DbMigrations.Error, A])(implicit + traceContext: TraceContext + ): EitherT[UnlessShutdown, DbMigrations.Error, A] = withCreatedDb(retryConfig)(fn) +} + +object DbMigrations { + + def createDataSource(jdbcDataSource: JdbcDataSource): DataSource = + jdbcDataSource match { + case dataS: DataSourceJdbcDataSource => dataS.ds + case dataS: HikariCPJdbcDataSource => dataS.ds + case unsupported => + // This should never happen + sys.error(s"Data source not supported for migrations: ${unsupported.getClass}") + } + + sealed trait Error extends PrettyPrinting + final case class FlywayError(err: FlywayException) extends Error { + override def pretty: Pretty[FlywayError] = prettyOfClass(unnamedParam(_.err)) + } + final case class PendingMigrationError(msg: String) extends Error { + override def pretty: Pretty[PendingMigrationError] = prettyOfClass(unnamedParam(_.msg.unquoted)) + } + final case class DatabaseError(error: String) extends Error { + override def pretty: Pretty[DatabaseError] = prettyOfClass(unnamedParam(_.error.unquoted)) + } + final case class DatabaseVersionError(error: String) extends Error { + override def pretty: Pretty[DatabaseVersionError] = prettyOfClass( + unnamedParam(_.error.unquoted) + ) + } + final case class DatabaseConfigError(error: String) extends Error { + override def pretty: Pretty[DatabaseConfigError] = prettyOfClass( + unnamedParam(_.error.unquoted) + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/DbStorageSingle.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/DbStorageSingle.scala new file mode 100644 index 0000000000..2269ec78d7 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/DbStorageSingle.scala @@ -0,0 +1,181 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.data.EitherT +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.config.{DbConfig, ProcessingTimeout, QueryCostMonitoringConfig} +import com.digitalasset.canton.health.ComponentHealthState +import com.digitalasset.canton.lifecycle.{CloseContext, FlagCloseable, UnlessShutdown} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.metrics.DbStorageMetrics +import com.digitalasset.canton.resource.DatabaseStorageError.DatabaseConnectionLost.DatabaseConnectionLost +import com.digitalasset.canton.resource.DbStorage.{DbAction, DbStorageCreationException} +import com.digitalasset.canton.time.EnrichedDurations.* +import com.digitalasset.canton.time.{Clock, PeriodicAction} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ResourceUtil +import slick.jdbc.JdbcBackend.Database + +import java.sql.SQLException +import java.util.concurrent.ScheduledExecutorService +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.{ExecutionContext, Future, blocking} + +/** DB Storage implementation that assumes a single process accessing the underlying database. */ +class DbStorageSingle private ( + override val profile: DbStorage.Profile, + override val dbConfig: DbConfig, + db: Database, + clock: Clock, + override protected val logOperations: Boolean, + override val metrics: DbStorageMetrics, + override protected val timeouts: ProcessingTimeout, + override val threadsAvailableForWriting: PositiveInt, + override protected val loggerFactory: NamedLoggerFactory, +)(override implicit val ec: ExecutionContext) + extends DbStorage + with FlagCloseable + with NamedLogging { + + private val isActiveRef = new AtomicReference[Boolean](true) + + override lazy val initialHealthState: ComponentHealthState = + if (isActiveRef.get()) ComponentHealthState.Ok() + else ComponentHealthState.failed("instance is passive") + + private val periodicConnectionCheck = new PeriodicAction( + clock, + // using the same interval for connection timeout as for periodic check + dbConfig.parameters.connectionTimeout.toInternal, + loggerFactory, + timeouts, + "db-connection-check", + )(tc => checkConnectivity(tc)) + + override protected[canton] def runRead[A]( + action: DbAction.ReadTransactional[A], + operationName: String, + maxRetries: Int, + )(implicit traceContext: TraceContext, closeContext: CloseContext): Future[A] = + run("reading", operationName, maxRetries)(db.run(action)) + + override protected[canton] def runWrite[A]( + action: DbAction.All[A], + operationName: String, + maxRetries: Int, + )(implicit traceContext: TraceContext, closeContext: CloseContext): Future[A] = + run("writing", operationName, maxRetries)(db.run(action)) + + override def onClosed(): Unit = { + periodicConnectionCheck.close() + db.close() + } + + override def isActive: Boolean = isActiveRef.get() + + private def checkConnectivity(implicit + traceContext: TraceContext + ): Future[Unit] = { + Future(blocking(try { + // FIXME(i11240): if db is backed by a connection pool, this can fail even if the db is healthy, because the pool is busy executing long-running queries + val connection = + // this will timeout and throw a SQLException if can't establish a connection + db.source.createConnection() + val valid = ResourceUtil.withResource(connection)( + _.isValid(dbConfig.parameters.connectionTimeout.duration.toSeconds.toInt) + ) + if (valid) resolveUnhealthy() + valid + } catch { + case e: SQLException => + failureOccurred(DatabaseConnectionLost(e.getMessage)) + false + })).map { active => + val old = isActiveRef.getAndSet(active) + val changed = old != active + if (changed) + logger.info(s"Changed db storage instance to ${if (active) "active" else "passive"}.") + } + } + +} + +object DbStorageSingle { + def tryCreate( + config: DbConfig, + clock: Clock, + scheduler: Option[ScheduledExecutorService], + connectionPoolForParticipant: Boolean, + logQueryCost: Option[QueryCostMonitoringConfig], + metrics: DbStorageMetrics, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + retryConfig: DbStorage.RetryConfig = DbStorage.RetryConfig.failFast, + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + closeContext: CloseContext, + ): DbStorageSingle = + create( + config, + connectionPoolForParticipant, + logQueryCost, + clock, + scheduler, + metrics, + timeouts, + loggerFactory, + retryConfig, + ) + .valueOr(err => throw new DbStorageCreationException(err)) + .onShutdown(throw new DbStorageCreationException("Shutdown during creation")) + + def create( + config: DbConfig, + connectionPoolForParticipant: Boolean, + logQueryCost: Option[QueryCostMonitoringConfig], + clock: Clock, + scheduler: Option[ScheduledExecutorService], + metrics: DbStorageMetrics, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + retryConfig: DbStorage.RetryConfig = DbStorage.RetryConfig.failFast, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + closeContext: CloseContext, + ): EitherT[UnlessShutdown, String, DbStorageSingle] = { + val numCombined = config.numCombinedConnectionsCanton( + connectionPoolForParticipant, + withWriteConnectionPool = false, + withMainConnection = false, + ) + val logger = loggerFactory.getTracedLogger(getClass) + logger.info(s"Creating storage, num-combined: $numCombined") + for { + db <- DbStorage.createDatabase( + config, + numCombined, + Some(metrics.queue), + logQueryCost, + scheduler, + retryConfig = retryConfig, + )(loggerFactory) + profile = DbStorage.profile(config) + storage = new DbStorageSingle( + profile, + config, + db, + clock, + logQueryCost.exists(_.logOperations), + metrics, + timeouts, + numCombined, + loggerFactory, + ) + } yield storage + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/DbVersionCheck.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/DbVersionCheck.scala new file mode 100644 index 0000000000..4974eaf31c --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/DbVersionCheck.scala @@ -0,0 +1,183 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.syntax.either.* +import com.daml.nameof.NameOf.functionFullName +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.{DbConfig, ProcessingTimeout} +import com.digitalasset.canton.logging.{HasLoggerName, NamedLoggingContext} +import com.digitalasset.canton.resource.DbStorage.Profile +import com.digitalasset.canton.util.LoggerUtil +import org.slf4j.event.Level +import slick.jdbc.JdbcBackend.Database +import slick.jdbc.canton.ActionBasedSQLInterpolation.Implicits.actionBasedSQLInterpolationCanton + +import scala.util.Try + +object DbVersionCheck extends HasLoggerName { + + def dbVersionCheck( + timeouts: ProcessingTimeout, + standardConfig: Boolean, + dbConfig: DbConfig, + )(implicit + loggingContext: NamedLoggingContext + ): Database => Either[DbMigrations.Error, Unit] = { db => + loggingContext.debug(s"Performing version checks") + val profile = DbStorage.profile(dbConfig) + val either: Either[DbMigrations.Error, Unit] = profile match { + + case Profile.Postgres(jdbc) => + val expectedPostgresVersions = NonEmpty(Seq, 11, 12, 13, 14, 15) + val expectedPostgresVersionsStr = + s"${(expectedPostgresVersions.dropRight(1)).mkString(", ")}, or ${expectedPostgresVersions + .takeRight(1) + .mkString("")}" + val maxPostgresVersion = expectedPostgresVersions.max1 + + // See https://www.postgresql.org/docs/9.1/sql-show.html + val query = sql"show server_version".as[String] + // Block on the query result, because `withDb` does not support running functions that return a + // future (at the time of writing). + val vector = timeouts.network.await(functionFullName)(db.run(query)) + val stringO = vector.headOption + val either = for { + versionString <- stringO.toRight(left = s"Could not read Postgres version") + // An example `versionString` is 12.9 (Debian 12.9-1.pgdg110+1) + majorVersion <- versionString + .split('.') + .headOption + .toRight(left = + s"Could not parse Postgres version string $versionString. Are you using the recommended Postgres version 11 ?" + ) + .flatMap(str => + Try(str.toInt).toEither.leftMap(exn => + s"Exception in parsing Postgres version string $versionString: $exn" + ) + ) + _unit <- { + if (expectedPostgresVersions.contains(majorVersion)) Right(()) + else if (majorVersion > maxPostgresVersion) { + val level = if (standardConfig) Level.WARN else Level.INFO + LoggerUtil.logAtLevel( + level, + s"Expected Postgres version $expectedPostgresVersionsStr but got higher version $versionString", + ) + Right(()) + } else + Left( + s"Expected Postgres version $expectedPostgresVersionsStr but got lower version $versionString" + ) + } + } yield () + either.leftMap(DbMigrations.DatabaseVersionError) + + case Profile.Oracle(jdbc) => + def checkOracleVersion(): Either[String, Unit] = { + + val expectedOracleVersion = 19 + val expectedOracleVersionPrefix = + " 19." // Leading whitespace is intentional, see the example bannerString + + // See https://docs.oracle.com/en/database/oracle/oracle-database/18/refrn/V-VERSION.html + val oracleVersionQuery = sql"select banner from v$$version".as[String].headOption + val stringO = timeouts.network.await(functionFullName)(db.run(oracleVersionQuery)) + stringO match { + case Some(bannerString) => + // An example `bannerString` is "Oracle Database 18c Express Edition Release 18.0.0.0.0 - Production" + if (bannerString.contains(expectedOracleVersionPrefix)) { + loggingContext.debug( + s"Check for oracle version $expectedOracleVersion passed: using $bannerString" + ) + Right(()) + } else { + Left(s"Expected Oracle version $expectedOracleVersion but got $bannerString") + } + case None => + Left(s"Database version check failed: could not read Oracle version") + } + } + + // Checks that the NLS parameter `param` is set to one of the `expected` strings + // - The DB setting must be set + // - The session setting may be empty + def checkNlsParameter( + param: String, + expected: Seq[String], + ): Either[String, Unit] = { + def prettyExpected: String = + if (expected.sizeIs == 1) expected(0) + else s"one of ${expected.mkString(", ")}" + + loggingContext.debug(s"Checking NLS parameter $param") + + @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) + val queryDbSetting = + sql"SELECT value from nls_database_parameters where parameter=$param" + .as[String] + .headOption + val dbSettingO = + timeouts.network.await(functionFullName + s"-database-$param")(db.run(queryDbSetting)) + + @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) + val querySessionSetting = + sql"SELECT value from nls_session_parameters where parameter=$param" + .as[String] + .headOption + val sessionSettingO = timeouts.network.await(functionFullName + s"-session-$param")( + db.run(querySessionSetting) + ) + + for { + // Require to find the setting for the database, but leave it optional for the session + dbSetting <- dbSettingO.toRight( + s"Oracle NLS database parameter $param is not set, but should be $prettyExpected" + ) + _ <- Either.cond( + expected.contains(dbSetting.toUpperCase), + loggingContext.debug(s"NLS database parameter $param is set to $dbSetting"), + s"Oracle NLS database parameter $param is $dbSetting, but should be $prettyExpected", + ) + + _ <- sessionSettingO.fold( + Either.right[String, Unit]( + loggingContext.debug(s"NLS session parameter $param is unset") + ) + ) { sessionSetting => + Either.cond( + expected.contains(sessionSetting.toUpperCase), + loggingContext.debug(s"NLS session parameter $param is set to $sessionSetting"), + s"Oracle NLS session parameter $param is $sessionSetting, but should be $prettyExpected", + ) + } + } yield () + } + + // Check the NLS settings of the database so that Oracle uses the expected encodings and collations for + // string fields in tables. + def checkOracleNlsSetting(): Either[String, Unit] = + for { + _ <- checkNlsParameter("NLS_CHARACTERSET", Seq("AL32UTF8")) + _ <- checkNlsParameter("NLS_NCHAR_CHARACTERSET", Seq("AL32UTF8", "AL16UTF16")) + _ <- checkNlsParameter("NLS_SORT", Seq("BINARY")) + _ <- checkNlsParameter("NLS_COMP", Seq("BINARY")) + } yield () + + for { + _ <- checkOracleVersion().leftMap(DbMigrations.DatabaseVersionError) + _ <- checkOracleNlsSetting().leftMap(DbMigrations.DatabaseConfigError) + } yield () + case Profile.H2(_) => + // We don't perform version checks for H2 + Right(()) + } + if (standardConfig) either + else + either.leftFlatMap { error => + loggingContext.info(error.toString) + Right(()) + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/Storage.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/Storage.scala new file mode 100644 index 0000000000..fcf6576cfd --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/Storage.scala @@ -0,0 +1,941 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.data.{Chain, EitherT, OptionT} +import cats.syntax.either.* +import cats.syntax.functor.* +import cats.{Eval, Functor, Monad} +import com.daml.nameof.NameOf.functionFullName +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.CantonRequireTypes.String255 +import com.digitalasset.canton.config.RequireTypes.{PositiveInt, PositiveNumeric} +import com.digitalasset.canton.config.* +import com.digitalasset.canton.crypto.Salt +import com.digitalasset.canton.health.{ + AtomicHealthComponent, + CloseableHealthComponent, + ComponentHealthState, +} +import com.digitalasset.canton.lifecycle.{ + CloseContext, + FlagCloseable, + HasCloseContext, + UnlessShutdown, +} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.logging.{ + ErrorLoggingContext, + NamedLoggerFactory, + NamedLogging, + TracedLogger, +} +import com.digitalasset.canton.metrics.{DbQueueMetrics, DbStorageMetrics} +import com.digitalasset.canton.protocol.ContractIdSyntax.* +import com.digitalasset.canton.protocol.{LfContractId, LfGlobalKey, LfHash} +import com.digitalasset.canton.resource.DbStorage.Profile.{H2, Oracle, Postgres} +import com.digitalasset.canton.resource.DbStorage.{DbAction, Profile} +import com.digitalasset.canton.resource.StorageFactory.StorageCreationException +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.store.db.{DbDeserializationException, DbSerializationException} +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.time.EnrichedDurations.* +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.canton.util.retry.RetryEither +import com.digitalasset.canton.util.retry.RetryUtil.DbExceptionRetryable +import com.digitalasset.canton.util.{Thereafter, *} +import com.digitalasset.canton.{LfPackageId, LfPartyId} +import com.google.protobuf.ByteString +import com.typesafe.config.{Config, ConfigValueFactory} +import com.typesafe.scalalogging.Logger +import com.zaxxer.hikari.pool.HikariPool.PoolInitializationException +import org.slf4j.event.Level +import slick.SlickException +import slick.dbio.* +import slick.jdbc.JdbcBackend.Database +import slick.jdbc.canton.ActionBasedSQLInterpolation.Implicits.actionBasedSQLInterpolationCanton +import slick.jdbc.canton.* +import slick.jdbc.{ActionBasedSQLInterpolation as _, SQLActionBuilder as _, *} +import slick.lifted.Aliases +import slick.util.{AsyncExecutor, AsyncExecutorWithMetrics, ClassLoaderUtil} + +import java.io.ByteArrayInputStream +import java.sql.{Blob, SQLException, Statement} +import java.util.UUID +import java.util.concurrent.atomic.AtomicInteger +import java.util.concurrent.{ScheduledExecutorService, TimeUnit} +import javax.sql.rowset.serial.SerialBlob +import scala.annotation.nowarn +import scala.collection.immutable +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future} +import scala.language.implicitConversions + +/** Storage resources (e.g., a database connection pool) that must be released on shutdown. + * + * The only common functionality defined is the shutdown through AutoCloseable. + * Using storage objects after shutdown is unsafe; thus, they should only be closed when they're ready for + * garbage collection. + */ +sealed trait Storage extends CloseableHealthComponent with AtomicHealthComponent { + self: NamedLogging => + + /** Indicates if the storage instance is active and ready to perform updates/writes. */ + def isActive: Boolean + +} + +trait StorageFactory { + def config: StorageConfig + + /** Throws an exception in case of errors or shutdown during storage creation. */ + def tryCreate( + connectionPoolForParticipant: Boolean, + logQueryCost: Option[QueryCostMonitoringConfig], + clock: Clock, + scheduler: Option[ScheduledExecutorService], + metrics: DbStorageMetrics, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + closeContext: CloseContext, + ): Storage = + create( + connectionPoolForParticipant, + logQueryCost, + clock, + scheduler, + metrics, + timeouts, + loggerFactory, + ) + .valueOr(err => throw new StorageCreationException(err)) + .onShutdown(throw new StorageCreationException("Shutdown during storage creation")) + + def create( + connectionPoolForParticipant: Boolean, + logQueryCost: Option[QueryCostMonitoringConfig], + clock: Clock, + scheduler: Option[ScheduledExecutorService], + metrics: DbStorageMetrics, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + closeContext: CloseContext, + ): EitherT[UnlessShutdown, String, Storage] +} + +object StorageFactory { + class StorageCreationException(message: String) extends RuntimeException(message) +} + +class CommunityStorageFactory(val config: CommunityStorageConfig) extends StorageFactory { + override def create( + connectionPoolForParticipant: Boolean, + logQueryCost: Option[QueryCostMonitoringConfig], + clock: Clock, + scheduler: Option[ScheduledExecutorService], + metrics: DbStorageMetrics, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + closeContext: CloseContext, + ): EitherT[UnlessShutdown, String, Storage] = + config match { + case CommunityStorageConfig.Memory(_, _) => + EitherT.rightT(new MemoryStorage(loggerFactory, timeouts)) + case db: DbConfig => + DbStorageSingle + .create( + db, + connectionPoolForParticipant, + logQueryCost, + clock, + scheduler, + metrics, + timeouts, + loggerFactory, + ) + .widen[Storage] + } +} + +class MemoryStorage( + override val loggerFactory: NamedLoggerFactory, + override val timeouts: ProcessingTimeout, +) extends Storage + with NamedLogging + with FlagCloseable { + override val name = "memory_storage" + override val initialHealthState: ComponentHealthState = ComponentHealthState.Ok() + + override def isActive: Boolean = true +} + +trait DbStore extends FlagCloseable with NamedLogging with HasCloseContext { + protected val storage: DbStorage +} + +trait DbStorage extends Storage { self: NamedLogging => + + val profile: DbStorage.Profile + val dbConfig: DbConfig + protected val logOperations: Boolean + + override val name: String = DbStorage.healthName + + override def initialHealthState: ComponentHealthState = + ComponentHealthState.NotInitializedState + + object DbStorageConverters { + + /** We use `bytea` in Postgres and Oracle and `binary large object` in H2. + * The reason is that starting from version 2.0, H2 imposes a limit of 1M + * for the size of a `bytea`. Hence, depending on the profile, SetParameter + * and GetResult for `Array[Byte]` are different for H2 and Oracle/Postgres. + */ + private lazy val byteArraysAreBlobs = profile match { + case _: H2 => true + case _ => false + } + + /** We use .setBinaryStream for Oracle instead of .setBytes, due to an ORA-03146 code which happens sometimes with: + * - BLOB sql field + * - MERGE query + * - new field value of size > 32K + * + * Canton #11644, support case #4136 + * Solution is based on: + * https://stackoverflow.com/questions/7794197/inserting-byte-array-as-blob-in-oracle-database-getting-ora-01460-unimplement + */ + private lazy val bytesArraysSetBinaryStream = profile match { + case _: Oracle => true + case _ => false + } + + implicit val setParameterByteArray: SetParameter[Array[Byte]] = (v, pp) => + if (byteArraysAreBlobs) pp.setBlob(bytesToBlob(v)) + else if (bytesArraysSetBinaryStream) { + val npos = pp.pos + 1 + pp.ps.setBinaryStream(npos, new ByteArrayInputStream(v)) + pp.pos = npos + } else { + pp.setBytes(v) + } + + implicit val getResultByteArray: GetResult[Array[Byte]] = + if (byteArraysAreBlobs) GetResult(r => blobToBytes(r.nextBlob())) + else GetResult(_.nextBytes()) + + @SuppressWarnings(Array("org.wartremover.warts.Null")) + implicit val setParameterOptionalByteArray: SetParameter[Option[Array[Byte]]] = (v, pp) => + if (byteArraysAreBlobs) pp.setBlobOption(v.map(bytesToBlob)) + else // Postgres profile will fail with setBytesOption if given None. Easy fix is to use setBytes with null instead + pp.setBytes(v.orNull) + + implicit val getResultOptionalByteArray: GetResult[Option[Array[Byte]]] = + if (byteArraysAreBlobs) + GetResult(_.nextBlobOption().map(blobToBytes)) + else + GetResult(_.nextBytesOption()) + + private def blobToBytes(blob: Blob): Array[Byte] = + if (blob.length() == 0) Array[Byte]() else blob.getBytes(1, blob.length().toInt) + + @SuppressWarnings(Array("org.wartremover.warts.Null")) + private def bytesToBlob(bytes: Array[Byte]): SerialBlob = + if (bytes != null) new SerialBlob(bytes) else null + } + + /** Returns database specific limit [offset] clause. + * Safe to use in a select slick query with #$... interpolation + */ + def limit(numberOfItems: Int, skipItems: Long = 0L): String = profile match { + case _: DbStorage.Profile.Oracle => + (if (skipItems != 0L) s"offset $skipItems rows " + else "") + s"fetch next $numberOfItems rows only" + case _ => s"limit $numberOfItems" + (if (skipItems != 0L) s" offset $skipItems" else "") + } + + /** Automatically performs #$ interpolation for a call to `limit` */ + def limitSql(numberOfItems: Int, skipItems: Long = 0L): SQLActionBuilder = { + sql" #${limit(numberOfItems, skipItems)} " + } + + /** Runs the given `query` transactionally with synchronous commit replication if + * the database provides the ability to configure synchronous commits per transaction. + * + * Currently only Postgres supports this. + */ + def withSyncCommitOnPostgres[A, E <: Effect]( + query: DBIOAction[A, NoStream, E] + ): DBIOAction[A, NoStream, Effect.Write with E with Effect.Transactional] = { + import profile.DbStorageAPI.jdbcActionExtensionMethods + profile match { + case _: Profile.Postgres => + val syncCommit = sqlu"set local synchronous_commit=on" + syncCommit.andThen(query).transactionally + case _: Profile.H2 | _: Profile.Oracle => + // Don't do anything for H2/Oracle. According to our docs it is up to the user to enforce synchronous replication. + // Any changes here are on a best-effort basis, but we won't guarantee they will be sufficient. + query.transactionally + } + } + + def metrics: DbStorageMetrics + + // Size of the pool available for writing, it may be a combined r/w pool or a dedicated write pool + def threadsAvailableForWriting: PositiveInt + + lazy val api: profile.DbStorageAPI.type = profile.DbStorageAPI + lazy val converters: DbStorageConverters.type = DbStorageConverters + + protected implicit def ec: ExecutionContext + protected def timeouts: ProcessingTimeout + + private val defaultMaxRetries = retry.Forever + + protected def run[A](action: String, operationName: String, maxRetries: Int)( + body: => Future[A] + )(implicit traceContext: TraceContext, closeContext: CloseContext): Future[A] = { + if (logOperations) { + logger.debug(s"started $action: $operationName") + } + import Thereafter.syntax.* + implicit val success: retry.Success[A] = retry.Success.always + retry + .Backoff( + logger, + closeContext.context, + maxRetries = maxRetries, + initialDelay = 50.milliseconds, + maxDelay = timeouts.storageMaxRetryInterval.unwrap, + operationName = operationName, + suspendRetries = Eval.always( + if (isActive) Duration.Zero + else dbConfig.parameters.connectionTimeout.asFiniteApproximation + ), + ) + .apply(body, DbExceptionRetryable) + .thereafter { _ => + if (logOperations) { + logger.debug(s"completed $action: $operationName") + } + } + + } + + protected[canton] def runRead[A]( + action: DbAction.ReadTransactional[A], + operationName: String, + maxRetries: Int, + )(implicit traceContext: TraceContext, closeContext: CloseContext): Future[A] + + protected[canton] def runWrite[A]( + action: DbAction.All[A], + operationName: String, + maxRetries: Int, + )(implicit traceContext: TraceContext, closeContext: CloseContext): Future[A] + + /** Read-only query, possibly transactional */ + def query[A]( + action: DbAction.ReadTransactional[A], + operationName: String, + maxRetries: Int = defaultMaxRetries, + )(implicit traceContext: TraceContext, closeContext: CloseContext): Future[A] = + runRead(action, operationName, maxRetries) + + def sequentialQueryAndCombine[A]( + actions: immutable.Iterable[DbAction.ReadOnly[immutable.Iterable[A]]], + operationName: String, + )(implicit + traceContext: TraceContext, + closeContext: CloseContext, + ): Future[immutable.Iterable[A]] = + if (actions.nonEmpty) { + MonadUtil.foldLeftM(actions.iterableFactory.empty[A], actions) { case (acc, action) => + query(action, operationName)(traceContext, closeContext).map(acc ++ _) + } + } else Future.successful(immutable.Iterable.empty[A]) + + def querySingle[A]( + action: DBIOAction[Option[A], NoStream, Effect.Read with Effect.Transactional], + operationName: String, + )(implicit traceContext: TraceContext, closeContext: CloseContext): OptionT[Future, A] = + OptionT(query(action, operationName)) + + /** Write-only action, possibly transactional + * + * The action must be idempotent because it may be retried multiple times. + * Only the result of the last retry will be reported. + * If the action reports the number of rows changed, + * this number may be lower than actual number of affected rows + * because updates from earlier retries are not accounted. + */ + def update[A]( + action: DBIOAction[A, NoStream, Effect.Write with Effect.Transactional], + operationName: String, + maxRetries: Int = defaultMaxRetries, + )(implicit traceContext: TraceContext, closeContext: CloseContext): Future[A] = + runWrite(action, operationName, maxRetries) + + /** Write-only action, possibly transactional + * The action must be idempotent because it may be retried multiple times. + */ + def update_( + action: DBIOAction[_, NoStream, Effect.Write with Effect.Transactional], + operationName: String, + maxRetries: Int = defaultMaxRetries, + )(implicit traceContext: TraceContext, closeContext: CloseContext): Future[Unit] = + runWrite(action, operationName, maxRetries).map(_ => ()) + + /** Query and update in a single action. + * + * Note that the action is not transactional by default, but can be made so + * via using `queryAndUpdate(action.transactionally..withTransactionIsolation(Serializable), "name")` + * + * The action must be idempotent because it may be retried multiple times. + * Only the result of the last retry will be reported. + * If the action reports the number of rows changed, + * this number may be lower than actual number of affected rows + * because updates from earlier retries are not accounted. + */ + def queryAndUpdate[A]( + action: DBIOAction[A, NoStream, Effect.All], + operationName: String, + maxRetries: Int = defaultMaxRetries, + )(implicit traceContext: TraceContext, closeContext: CloseContext): Future[A] = + runWrite(action, operationName, maxRetries) + +} + +object DbStorage { + val healthName: String = "db-storage" + + final case class PassiveInstanceException(reason: String) + extends RuntimeException(s"DbStorage instance is not active: $reason") + + sealed trait Profile extends Product with Serializable with PrettyPrinting { + def jdbc: JdbcProfile + + object DbStorageAPI extends Aliases { + lazy val jdbcProfile = jdbc + + implicit def jdbcActionExtensionMethods[E <: Effect, R, S <: NoStream]( + a: DBIOAction[R, S, E] + ): jdbcProfile.JdbcActionExtensionMethods[E, R, S] = + new jdbcProfile.JdbcActionExtensionMethods[E, R, S](a) + + implicit def actionBasedSQLInterpolationCanton( + s: StringContext + ): ActionBasedSQLInterpolation = + new ActionBasedSQLInterpolation(s) + } + } + + /** Indicate if the Db profile supports DB locks. */ + sealed trait DbLockSupport extends Product with Serializable + + object Profile { + final case class H2(jdbc: H2Profile) extends Profile { + override def pretty: Pretty[H2] = prettyOfObject[H2] + } + final case class Oracle(jdbc: OracleProfile) extends Profile with DbLockSupport { + override def pretty: Pretty[Oracle] = prettyOfObject[Oracle] + } + final case class Postgres(jdbc: PostgresProfile) extends Profile with DbLockSupport { + override def pretty: Pretty[Postgres] = prettyOfObject[Postgres] + } + } + + object DbAction { + type ReadOnly[+A] = DBIOAction[A, NoStream, Effect.Read] + type ReadTransactional[+A] = DBIOAction[A, NoStream, Effect.Read with Effect.Transactional] + type WriteOnly[+A] = DBIOAction[A, NoStream, Effect.Write] + type All[+A] = DBIOAction[A, NoStream, Effect.All] + + /** Use `.andThen(unit)` instead of `.map(_ => ())` for DBIOActions + * because `andThen` doesn't need an execution context and can thus be executed more efficiently + * according to the slick documentation https://scala-slick.org/doc/3.3.3/dbio.html#sequential-execution + */ + val unit: DBIOAction[Unit, NoStream, Effect] = DBIOAction.successful(()) + } + + object Implicits { + + implicit def functorDBIO[E <: Effect](implicit + ec: ExecutionContext + ): Functor[DBIOAction[*, NoStream, E]] = new Functor[DBIOAction[*, NoStream, E]] { + def map[A, B](fa: DBIOAction[A, NoStream, E])(f: A => B): DBIOAction[B, NoStream, E] = + fa.map(f) + } + + implicit def monadDBIO[E <: Effect](implicit + ec: ExecutionContext + ): Monad[DBIOAction[*, NoStream, E]] = + new Monad[DBIOAction[*, NoStream, E]] { + def flatMap[A, B](fa: DBIOAction[A, NoStream, E])( + f: A => DBIOAction[B, NoStream, E] + ): DBIOAction[B, NoStream, E] = fa.flatMap(f) + + def tailRecM[A, B](a: A)( + f: A => DBIOAction[Either[A, B], NoStream, E] + ): DBIOAction[B, NoStream, E] = + f(a).flatMap(_.fold(tailRecM(_)(f), pure)) + + def pure[A](x: A): DBIOAction[A, NoStream, E] = DBIOAction.successful(x) + } + + implicit val getResultUuid: GetResult[UUID] = GetResult(r => UUID.fromString(r.nextString())) + @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) // UUIDs are length-limited + implicit val setParameterUuid: SetParameter[UUID] = (v, pp) => pp.setString(v.toString) + + @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) // LfPartyIds are length-limited + implicit val setParameterLfPartyId: SetParameter[LfPartyId] = (v, pp) => pp.setString(v) + implicit val getResultLfPartyId: GetResult[LfPartyId] = GetResult(r => r.nextString()).andThen { + LfPartyId + .fromString(_) + .valueOr(err => + throw new DbDeserializationException(s"Failed to deserialize party ID: $err") + ) + } + + implicit val absCoidGetResult: GetResult[LfContractId] = GetResult(r => + ProtoConverter + .parseLfContractId(r.nextString()) + .fold(err => throw new DbDeserializationException(err.toString), Predef.identity) + ) + implicit val absCoidSetParameter: SetParameter[LfContractId] = + (c, pp) => pp >> c.toLengthLimitedString + + // We assume that the HexString of the hash of the global key will fit into 255 characters + // Please consult the team, if you want to increase this limit + implicit val lfGlobalKeySetParameter: SetParameter[LfGlobalKey] = + (key: LfGlobalKey, pp: PositionedParameters) => + pp >> String255.tryCreate(key.hash.toHexString) + implicit val lfHashGetResult: GetResult[LfHash] = GetResult { r => + LfHash + .fromString(r.nextString()) + .valueOr(err => + throw new DbSerializationException(s"Failed to deserialize contract key hash: $err") + ) + } + + // LfPackageIds are length-limited + @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) + implicit val setParameterLfPackageId: SetParameter[LfPackageId] = (v, pp) => pp.setString(v) + implicit val getResultPackageId: GetResult[LfPackageId] = + GetResult(r => r.nextString()).andThen { + LfPackageId + .fromString(_) + .valueOr(err => + throw new DbDeserializationException(s"Failed to deserialize package id: $err") + ) + } + + implicit val setParameterByteString: SetParameter[ByteString] = (bs, pp) => + pp.setBytes(bs.toByteArray) + implicit val getResultByteString: GetResult[ByteString] = + GetResult(r => ByteString.copyFrom(r.nextBytes())) + + @SuppressWarnings(Array("org.wartremover.warts.Null")) + implicit val setParameterByteStringOption: SetParameter[Option[ByteString]] = (b, pp) => + // Postgres profile will fail with setBytesOption if given None. Easy fix is to use setBytes with null instead + pp.setBytes(b.map(_.toByteArray).orNull) + implicit val getResultByteStringOption: GetResult[Option[ByteString]] = + GetResult(r => r.nextBytesOption().map(ByteString.copyFrom)) + + implicit val setContractSalt: SetParameter[Option[Salt]] = + (c, pp) => pp >> c.map(_.toProtoV0.toByteString) + implicit val getContractSalt: GetResult[Option[Salt]] = + implicitly[GetResult[Option[ByteString]]] andThen { + _.map(byteString => + ProtoConverter + .parse( + // Even though it is versioned, the Salt is considered static + // as it's used for authenticating contract ids which are immutable + com.digitalasset.canton.crypto.v0.Salt.parseFrom, + Salt.fromProtoV0, + byteString, + ) + .valueOr(err => + throw new DbDeserializationException( + s"Failed to deserialize contract salt: $err" + ) + ) + ) + } + + object BuilderChain { + + import scala.language.implicitConversions + implicit def toSQLActionBuilderChain(a1: SQLActionBuilder): SQLActionBuilderChain = + SQLActionBuilderChain(a1) + implicit def fromSQLActionBuilderChain(op: SQLActionBuilderChain): SQLActionBuilder = + op.toActionBuilder + implicit def mapBuilderChain(op: Option[SQLActionBuilderChain]): Option[SQLActionBuilder] = + op.map(_.toActionBuilder) + implicit def mergeBuildersIntoChain(op: Seq[SQLActionBuilder]): SQLActionBuilderChain = + SQLActionBuilderChain(op) + + } + + } + + class SQLActionBuilderChain(private val builders: Chain[SQLActionBuilder]) extends AnyVal { + def ++(a2: SQLActionBuilder): SQLActionBuilderChain = { + new SQLActionBuilderChain(builders.append(a2)) + } + def ++(other: SQLActionBuilderChain): SQLActionBuilderChain = { + new SQLActionBuilderChain(builders.concat(other.builders)) + } + def ++(others: Seq[SQLActionBuilderChain]): SQLActionBuilderChain = { + others.foldLeft(this)(_ ++ _) + } + def intercalate(item: SQLActionBuilder): SQLActionBuilderChain = { + new SQLActionBuilderChain( + builders.foldLeft(Chain.empty[SQLActionBuilder]) { case (acc, elem) => + if (acc.isEmpty) Chain.one(elem) + else acc.append(item).append(elem) + } + ) + } + def toActionBuilder: SQLActionBuilder = { + val lst = builders.toList + SQLActionBuilder( + builders.flatMap(x => Chain.fromSeq(x.queryParts)).toList, + (p: Unit, pp: PositionedParameters) => { + lst.foreach(_.unitPConv.apply(p, pp)) + }, + ) + } + } + + object SQLActionBuilderChain { + def intercalate(lst: Seq[SQLActionBuilder], item: SQLActionBuilder): SQLActionBuilderChain = + apply(lst).intercalate(item) + def apply(item: SQLActionBuilder): SQLActionBuilderChain = new SQLActionBuilderChain( + Chain.one(item) + ) + def apply(items: Seq[SQLActionBuilder]): SQLActionBuilderChain = new SQLActionBuilderChain( + Chain.fromSeq(items) + ) + } + + /** Used to create unique connection pool names. + */ + private val poolNameIndex: AtomicInteger = new AtomicInteger(0) + + def profile(config: DbConfig): Profile = + config match { + case _: H2DbConfig => H2(H2Profile) + case _: PostgresDbConfig => Postgres(PostgresProfile) + // TODO(i11009): assume unknown config is for oracle until we have proper oracle factory support + case _ => Oracle(OracleProfile) + } + + def createDatabase( + config: DbConfig, + numThreads: PositiveInt, + metrics: Option[DbQueueMetrics] = None, + logQueryCost: Option[QueryCostMonitoringConfig] = None, + scheduler: Option[ScheduledExecutorService], + forMigration: Boolean = false, + retryConfig: DbStorage.RetryConfig = DbStorage.RetryConfig.failFast, + )( + loggerFactory: NamedLoggerFactory + )(implicit closeContext: CloseContext): EitherT[UnlessShutdown, String, Database] = { + val baseLogger = loggerFactory.getLogger(classOf[DbStorage]) + val logger = TracedLogger(baseLogger) + + TraceContext.withNewTraceContext { implicit traceContext => + // Must be called to set proper defaults in case of H2 + val configWithFallbacks: Config = + DbConfig.configWithFallback(config)( + numThreads, + s"slick-${loggerFactory.threadName}-${poolNameIndex.incrementAndGet()}", + logger, + ) + + val configWithMigrationFallbacks: Config = if (forMigration) { + if (configWithFallbacks.hasPath("numThreads")) { + // The migration requires at least 2 threads. + val numThreads = configWithFallbacks.getInt("numThreads") + if (numThreads < 2) { + logger.info( + s"Overriding numThreads from $numThreads to 2 for the purpose of db migration, as flyway needs at least 2 threads." + ) + } + configWithFallbacks.withValue( + "numThreads", + ConfigValueFactory.fromAnyRef(numThreads max 2), + ) + } else { + // no fallback needed as default value works with migration + configWithFallbacks + } + } else { + // no fallback requested + configWithFallbacks + } + + logger.debug( + s"Initializing database storage with config: ${DbConfig.hideConfidential(configWithMigrationFallbacks)}" + ) + + RetryEither.retry[String, Database]( + maxRetries = retryConfig.maxRetries, + waitInMs = retryConfig.retryWaitingTime.toMillis, + operationName = functionFullName, + logger = logger, + retryLogLevel = retryConfig.retryLogLevel, + failLogLevel = Level.ERROR, + ) { + for { + db <- createJdbcBackendDatabase( + configWithMigrationFallbacks, + metrics, + logQueryCost, + scheduler, + config.parameters, + baseLogger, + ) + _ <- Either + .catchOnly[SQLException](db.createSession().close()) + .leftMap(err => show"Failed to create session with database: $err") + } yield db + }(ErrorLoggingContext.fromTracedLogger(logger), closeContext) + } + } + + @SuppressWarnings(Array("org.wartremover.warts.Null")) + private def createJdbcBackendDatabase( + config: Config, + metrics: Option[DbQueueMetrics], + logQueryCost: Option[QueryCostMonitoringConfig], + scheduler: Option[ScheduledExecutorService], + parameters: DbParametersConfig, + logger: Logger, + ): Either[String, Database] = { + // copy paste from JdbcBackend.forConfig + import slick.util.ConfigExtensionMethods.* + try { + val classLoader: ClassLoader = ClassLoaderUtil.defaultClassLoader + val source = JdbcDataSource.forConfig(config, null, "", classLoader) + val poolName = config.getStringOr("poolName", "") + val numThreads = config.getIntOr("numThreads", 20) + val maxConnections = source.maxConnections.getOrElse(numThreads) + val registerMbeans = config.getBooleanOr("registerMbeans", false) + + val executor = metrics match { + // inject our own Canton Async executor with metrics + case Some(m) => + new AsyncExecutorWithMetrics( + poolName, + numThreads, + numThreads, + queueSize = config.getIntOr("queueSize", 1000), + maxConnections = maxConnections, + registerMbeans = registerMbeans, + logQueryCost = logQueryCost, + metrics = m, + scheduler = scheduler, + warnOnSlowQueryO = parameters.warnOnSlowQuery.map(_.toInternal), + warnInterval = parameters.warnOnSlowQueryInterval.toInternal, + logger = logger, + ) + case None => + AsyncExecutor( + poolName, + numThreads, + numThreads, + queueSize = config.getIntOr("queueSize", 1000), + maxConnections = maxConnections, + registerMbeans = registerMbeans, + ) + } + + Right(JdbcBackend.Database.forSource(source, executor)) + } catch { + case ex: SlickException => Left(show"Failed to setup database access: $ex") + case ex: PoolInitializationException => Left(show"Failed to connect to database: $ex") + } + + } + + /** Construct a bulk operation (e.g., insertion, deletion). + * The operation must not return a result set! + * + * The returned action will run as a single big database transaction. If the execution of the transaction results + * in deadlocks, you should order `values` according to some consistent order. + * + * The returned update counts are merely lower bounds to the number of affected rows + * or SUCCESS_NO_INFO, because `Statement.executeBatch` + * reports partial execution of a batch as a `BatchUpdateException` with + * partial update counts therein and those update counts are not taken into consideration. + * + * This operation is idempotent if the statement is idempotent for each value. + * + * @throws java.lang.IllegalArgumentException if `statement` contains `"IGNORE_ROW_ON_DUPKEY_INDEX"` + * (See UpsertTestOracle for details.) + */ + def bulkOperation[A]( + statement: String, + values: immutable.Iterable[A], + profile: Profile, + )( + setParams: PositionedParameters => A => Unit + )(implicit loggingContext: ErrorLoggingContext): DBIOAction[Array[Int], NoStream, Effect.All] = { + // Bail out if the statement contains IGNORE_ROW_ON_DUPKEY_INDEX, because update counts are known to be broken. + // Use MERGE instead. + // Ignoring update counts is not an option, because the JDBC driver reads them internally and may fail with + // low-level exceptions. + // See UpsertTestOracle for details. + ErrorUtil.requireArgument( + !statement.toUpperCase.contains("IGNORE_ROW_ON_DUPKEY_INDEX"), + s"Illegal usage of bulkOperation with IGNORE_ROW_ON_DUPKEY_INDEX. $statement", + ) + + if (values.isEmpty) DBIOAction.successful(Array.empty) + else { + val action = SimpleJdbcAction { session => + ResourceUtil.withResource { + session.connection.prepareStatement(statement) + } { preparedStatement => + values.foreach { v => + val pp = new PositionedParameters(preparedStatement) + setParams(pp)(v) + preparedStatement.addBatch() + } + val updateCounts = preparedStatement.executeBatch() + ErrorUtil.requireState( + values.sizeIs == updateCounts.length, + s"executeBatch returned ${updateCounts.length} update counts for ${values.size} rows. " + + s"${updateCounts.mkString("Array(", ", ", ")")}", + ) + ErrorUtil.requireState( + updateCounts.forall(x => x == Statement.SUCCESS_NO_INFO || x >= 0), + show"Batch operation update counts must be either ${Statement.SUCCESS_NO_INFO} or non-negative. " + + show"Actual results were ${updateCounts.mkString("Array(", ", ", ")")}", + ) + // The JDBC documentation demands that `executeBatch` throw a `BatchUpdateException` + // if some updates in the batch fail and set the corresponding row update entry to `EXECUTE_FAILED`. + // We check here for EXECUTE_BATCH not being reported without an exception to be super safe. + updateCounts + } + } + + import profile.DbStorageAPI.* + profile match { + case _: Oracle => + // Oracle has the habit of not properly rolling back, if an error occurs and + // there is no transaction (i.e. autoCommit = true). Further details on this can be found in UpsertTestOracle. + action.transactionally + + case _ if values.sizeCompare(1) <= 0 => + // Disable auto-commit for better performance. + action + + case _ => action.transactionally + } + } + } + + /** Same as [[bulkOperation]] except that no update counts are returned. */ + def bulkOperation_[A]( + statement: String, + values: immutable.Iterable[A], + profile: Profile, + )( + setParams: PositionedParameters => A => Unit + )(implicit loggingContext: ErrorLoggingContext): DBIOAction[Unit, NoStream, Effect.All] = + bulkOperation(statement, values, profile)(setParams).andThen(DbAction.unit) + + /* Helper methods to make usage of EitherT[DBIO,] possible without requiring type hints */ + def dbEitherT[A, B](value: DBIO[Either[A, B]]): EitherT[DBIO, A, B] = EitherT[DBIO, A, B](value) + def dbEitherT[A]: DbEitherTRight[A] = new DbEitherTRight[A] + class DbEitherTRight[A] private[resource] { + def apply[B](value: DBIO[B])(implicit ec: ExecutionContext): EitherT[DBIO, A, B] = { + import DbStorage.Implicits.functorDBIO + EitherT.right[A](value) + } + } + + /** Construct an in clause for a given field. If there are too many elements, + * splits the clause into several ones. We need to split into several terms + * because Oracle imposes a limit on the number of elements in an + * in-clause (currently: 1000). + * + * @return An iterable of the grouped values and the in clause for the grouped values + */ + @nowarn("cat=unused") // somehow, f is wrongly reported as unused by the compiler + def toInClauses[T]( + field: String, + values: NonEmpty[Seq[T]], + maxValuesInSqlList: PositiveNumeric[Int], + )(implicit f: SetParameter[T]): immutable.Iterable[(Seq[T], SQLActionBuilder)] = { + import DbStorage.Implicits.BuilderChain.* + + values + .grouped(maxValuesInSqlList.unwrap) + .map { groupedValues => + val inClause = sql"#$field in (" ++ + groupedValues + .map(value => sql"$value") + .intercalate(sql", ") ++ sql")" + + groupedValues -> inClause.toActionBuilder + } + .to(immutable.Iterable) + } + + def toInClauses_[T]( + field: String, + values: NonEmpty[Seq[T]], + maxValuesSqlInListSize: PositiveNumeric[Int], + )(implicit f: SetParameter[T]): immutable.Iterable[SQLActionBuilder] = + toInClauses(field, values, maxValuesSqlInListSize).map { case (_, builder) => builder } + + class DbStorageCreationException(message: String) extends RuntimeException(message) + + final case class RetryConfig( + retryLogLevel: Level, + retryWaitingTime: Duration, + maxRetries: Int, + ) + + object RetryConfig { + val failFast = RetryConfig( + retryLogLevel = Level.WARN, + retryWaitingTime = Duration(300, TimeUnit.MILLISECONDS), + maxRetries = 3, + ) + val failSlow = RetryConfig( + retryLogLevel = Level.INFO, + retryWaitingTime = 1.second, + maxRetries = 30, + ) + val forever = RetryConfig( + retryLogLevel = Level.INFO, + retryWaitingTime = Duration(1, TimeUnit.SECONDS), + maxRetries = Int.MaxValue, + ) + } +} + +object Storage { + def threadsAvailableForWriting(storage: Storage): PositiveInt = { + storage match { + case _: MemoryStorage => PositiveInt.one + case dbStorage: DbStorage => dbStorage.threadsAvailableForWriting + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/TransactionalStoreUpdate.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/TransactionalStoreUpdate.scala new file mode 100644 index 0000000000..2fc0f6a00c --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/resource/TransactionalStoreUpdate.scala @@ -0,0 +1,129 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.syntax.foldable.* +import cats.syntax.functorFilter.* +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.lifecycle.CloseContext +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.metrics.TimedLoadGauge +import com.digitalasset.canton.tracing.TraceContext +import slick.dbio.{DBIOAction, Effect, NoStream} + +import scala.annotation.tailrec +import scala.concurrent.{ExecutionContext, Future} + +/** A store update operation that can be executed transactionally with other independent update operations. + * Transactionality means that either all updates execute or none. + * The updates in a transactional execution must be independent of each other. + * During such an execution, partial updates may be observable by concurrent store accesses. + * + * Useful for updating stores on multiple domains transactionally. + */ +sealed trait TransactionalStoreUpdate { + + /** Run the transactional update as a stand-alone update. */ + def runStandalone()(implicit + traceContext: TraceContext, + callerCloseContext: CloseContext, + ): Future[Unit] +} + +object TransactionalStoreUpdate { + + /** Executes the unordered sequence of [[TransactionalStoreUpdate]]s transactionally, + * i.e., either all of them succeed or none. + * + * @throws java.lang.IllegalArgumentException if `updates` contains several DB store updates that use different [[DbStorage]] objects. + */ + def execute( + updates: Seq[TransactionalStoreUpdate] + )(implicit + traceContext: TraceContext, + ec: ExecutionContext, + closeContext: CloseContext, + ): Future[Unit] = updates match { + case Seq() => Future.unit + case Seq(singleUpdate) => singleUpdate.runStandalone() + case _ => + // We first execute all DB updates in a single DB transaction and, if successful, all in-memory updates afterwards. + // This gives transactionality as the in-memory updates cannot fail by the requirement on `InMemoryTransactionalStoreUpdate`. + + val (dbUpdates, inMemUpdates) = updates.toList.partitionMap { + case upd: InMemoryTransactionalStoreUpdate => Right(upd) + case upd: DbTransactionalStoreUpdate => Left(upd) + } + + // Make sure that all DB updates use the same Db storage object. + // Otherwise we cannot combine the SQL updates into a single DB transaction. + val storages = dbUpdates.map(_.storage).distinct + require( + storages.sizeCompare(1) <= 0, + s"Cannot execute transactional updates across multiple DB storage objects: $storages", + ) + + lazy val updatesF = storages.headOption + .traverse_ { storage => + import storage.api.* + val dbUpdatesTransaction = DBIO.seq(dbUpdates.map(_.sql): _*).transactionally + storage.update_(dbUpdatesTransaction, functionFullName) + } + .map(_ => inMemUpdates.foreach(_.perform())) + + // We wrap all the metrics around everything. This makes them possibly overreport the timing, + // but it's the safest thing to do as a metric could cause a failure and thus + // lead to a transactionality violation. + val metrics = dbUpdates.mapFilter(_.metric) + // Even though this method is tailrec, the recursion is probably not stack-safe because + // we're building up a pile of lazy closures that then get unravelled in the `Nil` case. + @tailrec def wrapMetrics(fut: => Future[Unit], metrics: List[TimedLoadGauge]): Future[Unit] = + metrics match { + case Nil => fut + case metric :: rest => + wrapMetrics(metric.event(fut), rest) + } + wrapMetrics(updatesF, metrics) + } + + /** Transactional update of an in-memory store. + * + * @param perform The update to perform. Must always succeed and never throw an exception. + */ + private[canton] class InMemoryTransactionalStoreUpdate(val perform: () => Unit) + extends TransactionalStoreUpdate { + override def runStandalone()(implicit + traceContext: TraceContext, + callerCloseContext: CloseContext, + ): Future[Unit] = + Future.successful(perform()) + } + + private[canton] object InMemoryTransactionalStoreUpdate { + def apply(perform: => Unit): InMemoryTransactionalStoreUpdate = + new InMemoryTransactionalStoreUpdate(() => perform) + } + + /** Transactional update of a DB store. + * + * @param sql The DB action to perform. + * @param storage The [[DbStorage]] to be used to execute the `sql` action. + */ + private[canton] class DbTransactionalStoreUpdate( + val sql: DBIOAction[_, NoStream, Effect.Write with Effect.Transactional], + val storage: DbStorage, + val metric: Option[TimedLoadGauge], + override protected val loggerFactory: NamedLoggerFactory, + )(implicit val ec: ExecutionContext) + extends TransactionalStoreUpdate + with NamedLogging { + override def runStandalone()(implicit + traceContext: TraceContext, + callerCloseContext: CloseContext, + ): Future[Unit] = { + lazy val runDbF = storage.update_(sql, functionFullName)(traceContext, callerCloseContext) + metric.fold(runDbF)(_.event(runDbF)) + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/ApplicationHandler.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/ApplicationHandler.scala new file mode 100644 index 0000000000..dfe4b36cdf --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/ApplicationHandler.scala @@ -0,0 +1,161 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing + +import cats.Monoid +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.sequencing.protocol.Envelope +import com.digitalasset.canton.time.DomainTimeTracker +import com.digitalasset.canton.tracing.TraceContext +import com.google.common.annotations.VisibleForTesting + +import scala.concurrent.ExecutionContext + +/** An application handler processes boxed envelopes and returns a [[HandlerResult]] */ +trait ApplicationHandler[-Box[+_ <: Envelope[_]], -Env <: Envelope[_]] + extends (BoxedEnvelope[Box, Env] => HandlerResult) { + + /** Human-readable name of the application handler for logging and debugging */ + def name: String + + /** Called by the [[com.digitalasset.canton.sequencing.client.SequencerClient]] before the start of a subscription. + * @param domainTimeTracker The domain time tracker that listens to this application handler's subscription + */ + def subscriptionStartsAt(start: SubscriptionStart, domainTimeTracker: DomainTimeTracker)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Unit] + + /** Replaces the application handler's processing with `f` and + * leaves the [[subscriptionStartsAt]] logic and the name the same. + */ + def replace[Box2[+_ <: Envelope[_]], Env2 <: Envelope[_]]( + f: BoxedEnvelope[Box2, Env2] => HandlerResult + ): ApplicationHandler[Box2, Env2] = new ApplicationHandler[Box2, Env2] { + + override def name: String = ApplicationHandler.this.name + + override def subscriptionStartsAt( + start: SubscriptionStart, + domainTimeTracker: DomainTimeTracker, + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Unit] = + ApplicationHandler.this.subscriptionStartsAt(start, domainTimeTracker) + + override def apply(boxedEnvelope: BoxedEnvelope[Box2, Env2]): HandlerResult = + f(boxedEnvelope) + } + + /** Run the `other` ApplicationHandler after `this`. */ + def combineWith[Box2[+X <: Envelope[_]] <: Box[X], Env2 <: Env]( + other: ApplicationHandler[Box2, Env2] + )(implicit + ec: ExecutionContext + ): ApplicationHandler[Box2, Env2] = new ApplicationHandler[Box2, Env2] { + + override def name: String = + s"${ApplicationHandler.this.name}+${other.name}" + + override def subscriptionStartsAt( + start: SubscriptionStart, + domainTimeTracker: DomainTimeTracker, + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = + for { + _ <- ApplicationHandler.this.subscriptionStartsAt(start, domainTimeTracker) + _ <- other.subscriptionStartsAt(start, domainTimeTracker) + } yield () + + override def apply(boxedEnvelope: BoxedEnvelope[Box2, Env2]): HandlerResult = { + for { + r1 <- ApplicationHandler.this.apply(boxedEnvelope: BoxedEnvelope[Box, Env]) + r2 <- other.apply(boxedEnvelope) + } yield Monoid[AsyncResult].combine(r1, r2) + } + } +} + +object ApplicationHandler { + + /** Creates an application handler that runs `f` on the boxed envelopes + * and ignores the [[ApplicationHandler.subscriptionStartsAt]] notifications + */ + def create[Box[+_ <: Envelope[_]], Env <: Envelope[_]](name: String)( + f: BoxedEnvelope[Box, Env] => HandlerResult + ): ApplicationHandler[Box, Env] = { + val handlerName = name + new ApplicationHandler[Box, Env] { + + override val name: String = handlerName + + override def subscriptionStartsAt( + start: SubscriptionStart, + domainTimeTracker: DomainTimeTracker, + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Unit] = + FutureUnlessShutdown.unit + + override def apply(boxedEnvelope: BoxedEnvelope[Box, Env]): HandlerResult = f(boxedEnvelope) + } + } + + /** Application handler that does nothing and always succeeds */ + @VisibleForTesting + def success[Box[+_ <: Envelope[_]], Env <: Envelope[_]]( + name: String = "success" + ): ApplicationHandler[Box, Env] = + ApplicationHandler.create(name)(_ => HandlerResult.done) +} + +/** Information passed by the [[com.digitalasset.canton.sequencing.client.SequencerClient]] + * to the [[ApplicationHandler]] where the subscription (= processing of events) starts. + * The [[ApplicationHandler]] can then initialize itself appropriately. + */ +sealed trait SubscriptionStart extends Product with Serializable with PrettyPrinting + +/** The subscription is a resubscription. The application handler may have previously been called with an event. */ +sealed trait ResubscriptionStart extends SubscriptionStart + +object SubscriptionStart { + + /** The subscription is created for the first time. + * The application handler has never been called with an event. + */ + case object FreshSubscription extends SubscriptionStart { + override def pretty: Pretty[FreshSubscription] = prettyOfObject[FreshSubscription] + } + type FreshSubscription = FreshSubscription.type + + /** The first processed event is at some timestamp after the `cleanPrehead`. + * All events up to `cleanPrehead` inclusive have previously been processed completely. + * The application handler has never been called with an event with a higher timestamp. + */ + final case class CleanHeadResubscriptionStart(cleanPrehead: CantonTimestamp) + extends ResubscriptionStart { + + override def pretty: Pretty[CleanHeadResubscriptionStart] = prettyOfClass( + param("clean prehead", _.cleanPrehead) + ) + } + + /** The first processed event will be `firstReplayed`. + * + * @param cleanPreheadO The timestamp of the last event known to be clean. + * If set, this may be before, at, or after `firstReplayed`. + * If it is before `firstReplayed`, + * then `firstReplayed` is the timestamp of the first event after `cleanPreheadO`. + */ + final case class ReplayResubscriptionStart( + firstReplayed: CantonTimestamp, + cleanPreheadO: Option[CantonTimestamp], + ) extends ResubscriptionStart { + override def pretty: Pretty[ReplayResubscriptionStart] = prettyOfClass( + param("first replayed", _.firstReplayed), + paramIfDefined("clean prehead", _.cleanPreheadO), + ) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/AsyncResult.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/AsyncResult.scala new file mode 100644 index 0000000000..0f384b8949 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/AsyncResult.scala @@ -0,0 +1,50 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing + +import cats.Monoid +import com.digitalasset.canton.DoNotDiscardLikeFuture +import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, UnlessShutdown} + +import scala.concurrent.ExecutionContext +import scala.util.Try + +/** The asynchronous part of processing an event (or of a stage of its processing). */ +@DoNotDiscardLikeFuture +final case class AsyncResult(unwrap: FutureUnlessShutdown[Unit]) { + def andThenF( + f: Unit => FutureUnlessShutdown[Unit] + )(implicit ec: ExecutionContext): AsyncResult = { + AsyncResult(unwrap.flatMap(f)) + } + + def transform(f: Try[UnlessShutdown[Unit]] => Try[UnlessShutdown[Unit]])(implicit + ec: ExecutionContext + ): AsyncResult = + AsyncResult(unwrap.transform(f)) + + /** Analog to [[com.digitalasset.canton.util.Thereafter.thereafter]] + * We do not provide a [[com.digitalasset.canton.util.Thereafter.thereafter]] instance + * because [[AsyncResult]] doesn't take a type argument. + */ + def thereafter(f: Try[UnlessShutdown[Unit]] => Unit)(implicit ec: ExecutionContext): AsyncResult = + transform { res => + f(res) + res + } +} + +object AsyncResult { + + /** No asynchronous processing. */ + val immediate: AsyncResult = AsyncResult(FutureUnlessShutdown.unit) + + implicit def monoidAsyncResult(implicit ec: ExecutionContext): Monoid[AsyncResult] = + new Monoid[AsyncResult] { + override def empty: AsyncResult = immediate + override def combine(x: AsyncResult, y: AsyncResult): AsyncResult = { + AsyncResult(Monoid[FutureUnlessShutdown[Unit]].combine(x.unwrap, y.unwrap)) + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/EnvelopeBox.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/EnvelopeBox.scala new file mode 100644 index 0000000000..1e9db98119 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/EnvelopeBox.scala @@ -0,0 +1,118 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing + +import cats.{Applicative, Traverse} +import com.digitalasset.canton.sequencing.protocol.{Envelope, SequencedEvent} +import com.digitalasset.canton.store.SequencedEventStore.{ + IgnoredSequencedEvent, + OrdinarySequencedEvent, + PossiblyIgnoredSequencedEvent, +} +import com.digitalasset.canton.tracing.Traced + +/** Type class to manipulate envelopes inside their box. + * Specializes [[cats.Traverse]] to [[protocol.Envelope]] arguments. + */ +trait EnvelopeBox[Box[+_ <: Envelope[_]]] { + + /** Make this private so that we don't arbitrarily change the contents of a + * [[com.digitalasset.canton.sequencing.protocol.SequencedEvent]] that has its serialization + * memoized as cryptographic evidence. + */ + private[sequencing] def traverse[G[_], A <: Envelope[_], B <: Envelope[_]](boxedEnvelope: Box[A])( + f: A => G[B] + )(implicit G: Applicative[G]): G[Box[B]] + + /** We can compose a [[cats.Traverse]] with an [[EnvelopeBox]], but not several [[EnvelopeBox]]es due to the + * restriction to [[protocol.Envelope]]s in the type arguments. + */ + type ComposedBox[Outer[+_], +A <: Envelope[_]] = Outer[Box[A]] + + def revCompose[OuterBox[+_]](implicit + OuterBox: Traverse[OuterBox] + ): EnvelopeBox[Lambda[`+A <: Envelope[_]` => ComposedBox[OuterBox, A]]] = + new EnvelopeBox[Lambda[`+A <: Envelope[_]` => ComposedBox[OuterBox, A]]] { + override private[sequencing] def traverse[G[_], A <: Envelope[_], B <: Envelope[_]]( + boxedEnvelope: OuterBox[Box[A]] + )(f: A => G[B])(implicit G: Applicative[G]): G[OuterBox[Box[B]]] = + OuterBox.traverse(boxedEnvelope)(innerBox => EnvelopeBox.this.traverse(innerBox)(f)) + } +} + +object EnvelopeBox { + + def apply[Box[+_ <: Envelope[_]]](implicit Box: EnvelopeBox[Box]): EnvelopeBox[Box] = Box + + implicit val unsignedEnvelopeBox: EnvelopeBox[UnsignedEnvelopeBox] = { + type TracedSeqTraced[+A] = Traced[Seq[Traced[A]]] + EnvelopeBox[SequencedEvent].revCompose( + Traverse[Traced].compose[Seq].compose[Traced]: Traverse[TracedSeqTraced] + ) + } + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + private def traverseOrdinarySequencedEvent[G[_], A <: Envelope[_], B <: Envelope[_]]( + ordinaryEvent: OrdinarySequencedEvent[A] + )(f: A => G[B])(implicit G: Applicative[G]): G[OrdinarySequencedEvent[B]] = { + val oldSignedEvent = ordinaryEvent.signedEvent + G.map(SequencedEvent.signedContentEnvelopeBox.traverse(ordinaryEvent.signedEvent)(f)) { + newSignedEvent => + if (newSignedEvent eq oldSignedEvent) ordinaryEvent.asInstanceOf[OrdinarySequencedEvent[B]] + else ordinaryEvent.copy(signedEvent = newSignedEvent)(ordinaryEvent.traceContext) + } + } + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + private def traverseIgnoredSequencedEvent[G[_], A <: Envelope[_], B <: Envelope[_]]( + event: IgnoredSequencedEvent[A] + )(f: A => G[B])(implicit G: Applicative[G]): G[IgnoredSequencedEvent[B]] = + event.underlying match { + case None => G.pure(event.asInstanceOf[IgnoredSequencedEvent[B]]) + case Some(signedEvent) => + G.map(SequencedEvent.signedContentEnvelopeBox.traverse(signedEvent)(f)) { newSignedEvent => + if (newSignedEvent eq signedEvent) event.asInstanceOf[IgnoredSequencedEvent[B]] + else event.copy(underlying = Some(newSignedEvent))(event.traceContext) + } + } + + implicit val ordinarySequencedEventEnvelopeBox: EnvelopeBox[OrdinarySequencedEvent] = + new EnvelopeBox[OrdinarySequencedEvent] { + override private[sequencing] def traverse[G[_], A <: Envelope[_], B <: Envelope[_]]( + ordinaryEvent: OrdinarySequencedEvent[A] + )(f: A => G[B])(implicit G: Applicative[G]): G[OrdinarySequencedEvent[B]] = + traverseOrdinarySequencedEvent(ordinaryEvent)(f) + } + + implicit val ignoredSequencedEventEnvelopeBox: EnvelopeBox[IgnoredSequencedEvent] = + new EnvelopeBox[IgnoredSequencedEvent] { + override private[sequencing] def traverse[G[_], A <: Envelope[_], B <: Envelope[_]]( + ignoredEvent: IgnoredSequencedEvent[A] + )(f: A => G[B])(implicit G: Applicative[G]): G[IgnoredSequencedEvent[B]] = + traverseIgnoredSequencedEvent(ignoredEvent)(f) + } + + implicit val possiblyIgnoredSequencedEventEnvelopeBox + : EnvelopeBox[PossiblyIgnoredSequencedEvent] = + new EnvelopeBox[PossiblyIgnoredSequencedEvent] { + override private[sequencing] def traverse[G[_], A <: Envelope[_], B <: Envelope[_]]( + event: PossiblyIgnoredSequencedEvent[A] + )(f: A => G[B])(implicit G: Applicative[G]): G[PossiblyIgnoredSequencedEvent[B]] = + event match { + case ignored @ IgnoredSequencedEvent(_, _, _, _) => + G.widen(traverseIgnoredSequencedEvent[G, A, B](ignored)(f)) + case ordinary @ OrdinarySequencedEvent(_, _) => + G.widen(traverseOrdinarySequencedEvent(ordinary)(f)) + } + } + + private type TracedSeq[+A] = Traced[Seq[A]] + implicit val ordinaryEnvelopeBox: EnvelopeBox[OrdinaryEnvelopeBox] = + ordinarySequencedEventEnvelopeBox.revCompose(Traverse[Traced].compose[Seq]: Traverse[TracedSeq]) + + implicit val possiblyIgnoredEnvelopeBox: EnvelopeBox[PossiblyIgnoredEnvelopeBox] = + possiblyIgnoredSequencedEventEnvelopeBox.revCompose( + Traverse[Traced].compose[Seq]: Traverse[TracedSeq] + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/HandlerResult.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/HandlerResult.scala new file mode 100644 index 0000000000..263bdfa1d8 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/HandlerResult.scala @@ -0,0 +1,55 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing + +import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, UnlessShutdown} + +import scala.concurrent.{ExecutionContext, Future} + +object HandlerResult { + + /** Denotes that the synchronous processing stage for an event has completed + * and there is no asynchronous processing for this stage. + */ + val done: HandlerResult = FutureUnlessShutdown.pure(AsyncResult.immediate) + + /** The given [[com.digitalasset.canton.lifecycle.FutureUnlessShutdown]] shall be run synchronously, i.e., + * later stages of processing the request will not start until this future has completed + * with a [[com.digitalasset.canton.lifecycle.UnlessShutdown.Outcome]]. + * Later requests will also only be processed after this future has completed + * with an [[com.digitalasset.canton.lifecycle.UnlessShutdown.Outcome]]. + * If the future fails with an exception or + * returns [[com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown]], + * the sequencer client will close the subscription. + */ + def synchronous(future: FutureUnlessShutdown[Unit])(implicit + ec: ExecutionContext + ): HandlerResult = + future.map(_ => AsyncResult.immediate) + + /** Embeds an evaluated [[com.digitalasset.canton.lifecycle.UnlessShutdown]] + * into a [[synchronous]] [[HandlerResult]]. + */ + def unlessShutdown(x: UnlessShutdown[Unit]): HandlerResult = + FutureUnlessShutdown.lift(x.map(_ => AsyncResult.immediate)) + + /** Shorthand for `synchronous(FutureUnlessShutdown.outcomeF(future))` */ + def fromFuture(future: Future[Unit])(implicit ec: ExecutionContext): HandlerResult = + synchronous(FutureUnlessShutdown.outcomeF(future)) + + /** The given [[com.digitalasset.canton.lifecycle.FutureUnlessShutdown]] + * is an asynchronous processing part for the event. + * It can run in parallel with any of the following: + * * Earlier events' asynchronous processing + * * Later events' synchronous and asynchronous processing + * * Later stages of synchronous processing for the same event + * The event will be marked as clean only after the future has completed successfully + * with [[com.digitalasset.canton.lifecycle.UnlessShutdown.Outcome]]. + * If the future fails with an exception or + * returns [[com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown]], + * the sequencer client will eventually close the subscription. + */ + def asynchronous(future: FutureUnlessShutdown[Unit]): HandlerResult = + FutureUnlessShutdown.pure(AsyncResult(future)) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencedEventMonotonicityChecker.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencedEventMonotonicityChecker.scala new file mode 100644 index 0000000000..243d3cb9c2 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencedEventMonotonicityChecker.scala @@ -0,0 +1,126 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing + +import cats.syntax.functorFilter.* +import com.digitalasset.canton.SequencerCounter +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.sequencing.protocol.ClosedEnvelope +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ErrorUtil +import com.digitalasset.canton.util.PekkoUtil.WithKillSwitch +import com.google.common.annotations.VisibleForTesting +import org.apache.pekko.NotUsed +import org.apache.pekko.stream.scaladsl.Flow + +/** Checks that the sequenced events' sequencer counters are a gap-free increasing sequencing starting at `firstSequencerCounter` + * and their timestamps increase strictly monotonically. When a violation is detected, an error is logged and + * the processing is aborted. + * + * This is normally ensured by the [[com.digitalasset.canton.sequencing.client.SequencedEventValidator]] for individual sequencer subscriptions. + * However, due to aggregating multiple subscriptions from several sequencers up to a threshold, + * the stream of events emitted by the aggregation may violate monotonicity. This additional monotonicity check + * ensures that we catch such violations before we pass the events downstream. + */ +class SequencedEventMonotonicityChecker( + firstSequencerCounter: SequencerCounter, + firstTimestampLowerBoundInclusive: CantonTimestamp, + override protected val loggerFactory: NamedLoggerFactory, +) extends NamedLogging { + import SequencedEventMonotonicityChecker.* + + /** Pekko version of the check. Pulls the kill switch and drains the source when a violation is detected. */ + def flow: Flow[ + WithKillSwitch[OrdinarySerializedEvent], + WithKillSwitch[OrdinarySerializedEvent], + NotUsed, + ] = { + Flow[WithKillSwitch[OrdinarySerializedEvent]] + .statefulMap(() => initialState)( + (state, eventAndKillSwitch) => eventAndKillSwitch.traverse(onNext(state, _)), + _ => None, + ) + .mapConcat { actionAndKillSwitch => + actionAndKillSwitch.traverse { + case Emit(event) => Some(event) + case failure: MonotonicityFailure => + implicit val traceContext: TraceContext = failure.event.traceContext + logger.error(failure.message) + actionAndKillSwitch.killSwitch.shutdown() + None + case Drop => None + } + } + } + + /** [[com.digitalasset.canton.sequencing.ApplicationHandler]] version. + * @throws com.digitalasset.canton.sequencing.SequencedEventMonotonicityChecker.MonotonicityFailureException + * when a monotonicity violation is detected + */ + def handler( + handler: OrdinaryApplicationHandler[ClosedEnvelope] + ): OrdinaryApplicationHandler[ClosedEnvelope] = { + // Application handlers must be called sequentially, so a plain var is good enough here + @SuppressWarnings(Array("org.wartremover.warts.Var")) + var state: State = initialState + handler.replace { tracedEvents => + val filtered = tracedEvents.map(_.mapFilter { event => + val (nextState, action) = onNext(state, event) + state = nextState + action match { + case Emit(_) => Some(event) + case failure: MonotonicityFailure => + implicit val traceContext: TraceContext = event.traceContext + ErrorUtil.internalError(failure.asException) + case Drop => None + } + }) + handler.apply(filtered) + } + } + + private def initialState: State = + GoodState(firstSequencerCounter, firstTimestampLowerBoundInclusive) + + private def onNext(state: State, event: OrdinarySerializedEvent): (State, Action) = state match { + case Failed => (state, Drop) + case GoodState(nextSequencerCounter, lowerBoundTimestamp) => + val monotonic = + event.counter == nextSequencerCounter && event.timestamp >= lowerBoundTimestamp + if (monotonic) { + val nextState = GoodState(event.counter + 1, event.timestamp.immediateSuccessor) + nextState -> Emit(event) + } else { + val error = MonotonicityFailure(nextSequencerCounter, lowerBoundTimestamp, event) + Failed -> error + } + } +} + +object SequencedEventMonotonicityChecker { + + private sealed trait Action extends Product with Serializable + private final case class Emit(event: OrdinarySerializedEvent) extends Action + private case object Drop extends Action + private final case class MonotonicityFailure( + expectedSequencerCounter: SequencerCounter, + timestampLowerBound: CantonTimestamp, + event: OrdinarySerializedEvent, + ) extends Action { + def message: String = + s"Sequencer counters and timestamps do not increase monotonically. Expected next counter=$expectedSequencerCounter with timestamp lower bound $timestampLowerBound, but received ${event.signedEvent.content}" + + def asException: Exception = new MonotonicityFailureException(message) + } + @VisibleForTesting + class MonotonicityFailureException(message: String) extends Exception(message) + + private sealed trait State extends Product with Serializable + private case object Failed extends State + private final case class GoodState( + nextSequencerCounter: SequencerCounter, + lowerBoundTimestamp: CantonTimestamp, + ) extends State +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerAggregator.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerAggregator.scala new file mode 100644 index 0000000000..02a38178cb --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerAggregator.scala @@ -0,0 +1,295 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing + +import com.daml.nonempty.{NonEmpty, NonEmptyUtil} +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.crypto.{CryptoPureApi, Hash, HashPurpose, Signature} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.health.ComponentHealthState +import com.digitalasset.canton.lifecycle.{ + FlagCloseable, + FutureUnlessShutdown, + PromiseUnlessShutdown, +} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.sequencing.SequencerAggregator.{ + MessageAggregationConfig, + SequencerAggregatorError, +} +import com.digitalasset.canton.sequencing.protocol.SignedContent +import com.digitalasset.canton.topology.SequencerId +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ShowUtil.* +import com.google.common.annotations.VisibleForTesting + +import java.util.concurrent.atomic.AtomicReference +import java.util.concurrent.{ArrayBlockingQueue, BlockingQueue} +import scala.annotation.tailrec +import scala.collection.mutable +import scala.concurrent.{ExecutionContext, blocking} + +class SequencerAggregator( + cryptoPureApi: CryptoPureApi, + eventInboxSize: PositiveInt, + val loggerFactory: NamedLoggerFactory, + initialConfig: MessageAggregationConfig, + override val timeouts: ProcessingTimeout, + futureSupervisor: FutureSupervisor, +) extends NamedLogging + with FlagCloseable { + + private val configRef: AtomicReference[MessageAggregationConfig] = + new AtomicReference[MessageAggregationConfig](initialConfig) + def expectedSequencers: NonEmpty[Set[SequencerId]] = configRef.get().expectedSequencers + + def sequencerTrustThreshold: PositiveInt = configRef.get().sequencerTrustThreshold + + private case class SequencerMessageData( + eventBySequencer: Map[SequencerId, OrdinarySerializedEvent], + promise: PromiseUnlessShutdown[Either[SequencerAggregatorError, SequencerId]], + ) + + /** Queue containing received and not yet handled events. + * Used for batched processing. + */ + private val receivedEvents: BlockingQueue[OrdinarySerializedEvent] = + new ArrayBlockingQueue[OrdinarySerializedEvent](eventInboxSize.unwrap) + + private val sequenceData = mutable.TreeMap.empty[CantonTimestamp, SequencerMessageData] + + @SuppressWarnings(Array("org.wartremover.warts.Var")) + private var cursor: Option[CantonTimestamp] = None + + def eventQueue: BlockingQueue[OrdinarySerializedEvent] = receivedEvents + + private def hash(message: OrdinarySerializedEvent) = + SignedContent.hashContent( + cryptoPureApi, + message.signedEvent.content, + HashPurpose.SequencedEventSignature, + ) + + @VisibleForTesting + def combine( + messages: NonEmpty[Seq[OrdinarySerializedEvent]] + ): Either[SequencerAggregatorError, OrdinarySerializedEvent] = { + val message: OrdinarySerializedEvent = messages.head1 + val expectedMessageHash = hash(message) + val hashes: NonEmpty[Set[Hash]] = messages.map(hash).toSet + val timestampsOfSigningKey = messages.map(_.signedEvent.timestampOfSigningKey).toSet + for { + _ <- Either.cond( + hashes.forall(_ == expectedMessageHash), + (), + SequencerAggregatorError.NotTheSameContentHash(hashes), + ) + expectedTimestampOfSigningKey = message.signedEvent.timestampOfSigningKey + _ <- Either.cond( + messages.forall(_.signedEvent.timestampOfSigningKey == expectedTimestampOfSigningKey), + (), + SequencerAggregatorError.NotTheSameTimestampOfSigningKey(timestampsOfSigningKey), + ) + } yield { + val combinedSignatures: NonEmpty[Seq[Signature]] = messages.flatMap(_.signedEvent.signatures) + + val potentiallyNonEmptyTraceContext = messages + .find(_.traceContext != TraceContext.empty) + .map(_.traceContext) + .getOrElse(message.traceContext) + + message.copy(signedEvent = message.signedEvent.copy(signatures = combinedSignatures))( + potentiallyNonEmptyTraceContext + ) + } + } + + private def addEventToQueue(event: OrdinarySerializedEvent): Unit = { + implicit val traceContext: TraceContext = event.traceContext + logger.debug( + show"Storing event in the event inbox.\n${event.signedEvent.content}" + ) + if (!receivedEvents.offer(event)) { + logger.debug( + s"Event inbox is full. Blocking sequenced event with timestamp ${event.timestamp}." + ) + blocking { + receivedEvents.put(event) + } + logger.debug( + s"Unblocked sequenced event with timestamp ${event.timestamp}." + ) + } + } + + private def addEventToQueue( + messages: NonEmpty[List[OrdinarySerializedEvent]] + ): Either[SequencerAggregatorError, Unit] = + combine(messages).map(addEventToQueue) + + def combineAndMergeEvent( + sequencerId: SequencerId, + message: OrdinarySerializedEvent, + )(implicit + ec: ExecutionContext + ): FutureUnlessShutdown[Either[SequencerAggregatorError, Boolean]] = { + if (!expectedSequencers.contains(sequencerId)) { + throw new IllegalArgumentException(s"Unexpected sequencerId: $sequencerId") + } + blocking { + this.synchronized { + if (cursor.forall(message.timestamp > _)) { + val sequencerMessageData = updatedSequencerMessageData(sequencerId, message) + sequenceData.put(message.timestamp, sequencerMessageData): Unit + + val (nextMinimumTimestamp, nextData) = + sequenceData.headOption.getOrElse( + (message.timestamp, sequencerMessageData) + ) // returns min message.timestamp + + pushDownstreamIfConsensusIsReached(nextMinimumTimestamp, nextData) + + sequencerMessageData.promise.futureUS.map(_.map(_ == sequencerId)) + } else + FutureUnlessShutdown.pure(Right(false)) + } + } + } + + private def pushDownstreamIfConsensusIsReached( + nextMinimumTimestamp: CantonTimestamp, + nextData: SequencerMessageData, + ): Unit = { + val expectedMessages = nextData.eventBySequencer.view.filterKeys { sequencerId => + expectedSequencers.contains(sequencerId) + } + + if (expectedMessages.sizeCompare(sequencerTrustThreshold.unwrap) >= 0) { + cursor = Some(nextMinimumTimestamp) + sequenceData.remove(nextMinimumTimestamp): Unit + + val nonEmptyMessages = NonEmptyUtil.fromUnsafe(expectedMessages.toMap) + val messagesToCombine = nonEmptyMessages.map(_._2).toList + val (sequencerIdToNotify, _) = nonEmptyMessages.head1 + + nextData.promise + .outcome( + addEventToQueue(messagesToCombine).map(_ => sequencerIdToNotify) + ) + } + } + + private def updatedSequencerMessageData( + sequencerId: SequencerId, + message: OrdinarySerializedEvent, + )(implicit + ec: ExecutionContext + ): SequencerMessageData = { + implicit val traceContext = message.traceContext + val promise = new PromiseUnlessShutdown[Either[SequencerAggregatorError, SequencerId]]( + "replica-manager-sync-service", + futureSupervisor, + ) + val data = + sequenceData.getOrElse( + message.timestamp, + SequencerMessageData(Map(), promise), + ) + data.copy(eventBySequencer = data.eventBySequencer.updated(sequencerId, message)) + } + + def changeMessageAggregationConfig( + newConfig: MessageAggregationConfig + ): Unit = blocking { + this.synchronized { + configRef.set(newConfig) + sequenceData.headOption.foreach { case (nextMinimumTimestamp, nextData) => + pushDownstreamIfConsensusIsReached( + nextMinimumTimestamp, + nextData, + ) + } + } + } + + @SuppressWarnings(Array("NonUnitForEach")) + override protected def onClosed(): Unit = + blocking { + this.synchronized { + sequenceData.view.values + .foreach(_.promise.shutdown()) + } + } +} +object SequencerAggregator { + final case class MessageAggregationConfig( + expectedSequencers: NonEmpty[Set[SequencerId]], + sequencerTrustThreshold: PositiveInt, + ) + sealed trait SequencerAggregatorError extends Product with Serializable with PrettyPrinting + object SequencerAggregatorError { + final case class NotTheSameContentHash(hashes: NonEmpty[Set[Hash]]) + extends SequencerAggregatorError { + override def pretty: Pretty[NotTheSameContentHash] = + prettyOfClass(param("hashes", _.hashes)) + } + final case class NotTheSameTimestampOfSigningKey( + timestamps: NonEmpty[Set[Option[CantonTimestamp]]] + ) extends SequencerAggregatorError { + override def pretty: Pretty[NotTheSameTimestampOfSigningKey] = + prettyOfClass(param("timestamps", _.timestamps)) + } + } + + def aggregateHealthResult( + healthResult: Map[SequencerId, ComponentHealthState], + threshold: PositiveInt, + ): ComponentHealthState = { + NonEmpty.from(healthResult) match { + case None => ComponentHealthState.NotInitializedState + case Some(healthResultNE) if healthResult.sizeIs == 1 && threshold == PositiveInt.one => + // If only one sequencer ID is configured and threshold is one, forward the sequencer's health state unchanged + // for backwards compatibility + val (_, state) = healthResultNE.head1 + state + case Some(_) => + // Healthy if at least `threshold` many sequencer connections are healthy, else + // Degraded if at least `threshold` many sequencer connections are healthy or degraded, else + // Failed + + val iter = healthResult.iterator + + @tailrec + def go( + healthyCount: Int, + failed: Seq[SequencerId], + degraded: Seq[SequencerId], + ): ComponentHealthState = { + if (healthyCount >= threshold.value) ComponentHealthState.Ok() + else if (!iter.hasNext) { + val failureMsg = Option.when(failed.nonEmpty)( + s"Failed sequencer subscriptions for [${failed.sortBy(_.toProtoPrimitive).mkString(", ")}]." + ) + val degradationMsg = Option.when(degraded.nonEmpty)( + s"Degraded sequencer subscriptions for [${degraded.sortBy(_.toProtoPrimitive).mkString(", ")}]." + ) + val message = Seq(failureMsg, degradationMsg).flatten.mkString(" ") + if (degraded.sizeIs >= threshold.value - healthyCount) + ComponentHealthState.degraded(message) + else ComponentHealthState.failed(message) + } else { + val (sequencerId, state) = iter.next() + if (state.isOk) go(healthyCount + 1, failed, degraded) + else if (state.isFailed) go(healthyCount, sequencerId +: failed, degraded) + else go(healthyCount, failed, sequencerId +: degraded) + } + } + + go(0, Seq.empty, Seq.empty) + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerAggregatorPekko.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerAggregatorPekko.scala new file mode 100644 index 0000000000..b3afeb485c --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerAggregatorPekko.scala @@ -0,0 +1,311 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing + +import cats.syntax.either.* +import cats.syntax.functor.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.SequencerCounter +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.crypto.{Hash, HashOps, HashPurpose} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.health.{ + ComponentHealthState, + CompositeHealthComponent, + HealthComponent, +} +import com.digitalasset.canton.lifecycle.OnShutdownRunner +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} +import com.digitalasset.canton.sequencing.client.{ + SequencedEventValidator, + SequencerSubscriptionFactoryPekko, +} +import com.digitalasset.canton.sequencing.protocol.SignedContent +import com.digitalasset.canton.store.SequencedEventStore.OrdinarySequencedEvent +import com.digitalasset.canton.topology.{DomainId, SequencerId} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.OrderedBucketMergeHub.{ + ActiveSourceTerminated, + ControlOutput, + NewConfiguration, + OutputElement, +} +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.{ + ErrorUtil, + OrderedBucketMergeConfig, + OrderedBucketMergeHub, + OrderedBucketMergeHubOps, +} +import com.digitalasset.canton.version.RepresentativeProtocolVersion +import org.apache.pekko.Done +import org.apache.pekko.stream.scaladsl.{Flow, Source} +import org.apache.pekko.stream.{KillSwitch, OverflowStrategy} + +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.{ExecutionContext, Future} + +/** Aggregates sequenced events from a dynamically configurable set of + * [[com.digitalasset.canton.sequencing.client.SequencerSubscriptionPekko]]s + * until a configurable threshold is reached. + * + * @param eventValidator The validator used to validate the sequenced events of the + * [[com.digitalasset.canton.sequencing.client.SequencerSubscriptionPekko]]s + * @param bufferSize How many elements to buffer for each + * [[com.digitalasset.canton.sequencing.client.SequencerSubscriptionPekko]]. + */ +class SequencerAggregatorPekko( + domainId: DomainId, + eventValidator: SequencedEventValidator, + bufferSize: PositiveInt, + hashOps: HashOps, + override protected val loggerFactory: NamedLoggerFactory, + enableInvariantCheck: Boolean, +) extends NamedLogging { + + import SequencerAggregatorPekko.* + + /** Convert a stream of sequencer configurations into a stream of aggregated sequenced events. + * + * Must be materialized at most once. + * + * @param initialCounterOrPriorEvent The sequencer counter to start the subscription from or the prior event to validate the subscription against. + * If present, the prior event's sequencer counter determines the subscription start. + */ + def aggregateFlow[E: Pretty]( + initialCounterOrPriorEvent: Either[SequencerCounter, PossiblyIgnoredSerializedEvent] + )(implicit traceContext: TraceContext, executionContext: ExecutionContext): Flow[ + OrderedBucketMergeConfig[SequencerId, HasSequencerSubscriptionFactoryPekko[E]], + Either[SubscriptionControl[E], OrdinarySerializedEvent], + (Future[Done], HealthComponent), + ] = { + val onShutdownRunner = new OnShutdownRunner.PureOnShutdownRunner(logger) + val health = new SequencerAggregatorHealth(domainId, onShutdownRunner, logger) + val ops = new SequencerAggregatorMergeOps(initialCounterOrPriorEvent, health) + val hub = new OrderedBucketMergeHub[ + SequencerId, + OrdinarySerializedEvent, + HasSequencerSubscriptionFactoryPekko[E], + SequencerCounter, + HealthComponent, + ](ops, loggerFactory, enableInvariantCheck) + Flow + .fromGraph(hub) + .map { + case OutputElement(elems) => Right(mergeBucket(elems)) + case control: SubscriptionControlInternal[E] => + logError(control) + health.updateHealth(control) + Left(control.map((_, configAndHealth) => configAndHealth._1, Predef.identity)) + } + .mapMaterializedValue { doneF => + val doneAndClosedF = doneF.thereafter { _ => onShutdownRunner.close() } + doneAndClosedF -> health + } + } + + private def mergeBucket( + elems: NonEmpty[Map[SequencerId, OrdinarySerializedEvent]] + ): OrdinarySerializedEvent = { + val (_, someElem) = elems.head1 + + // By the definition of `Bucket`, the contents, timestamp of signing key + // and the representative protocol version are the same + val content = someElem.signedEvent.content + val timestampOfSigningKey = someElem.signedEvent.timestampOfSigningKey + val representativeProtocolVersion = someElem.signedEvent.representativeProtocolVersion + + // We don't want to force trace contexts to be propagated identically. + // So lets merge them. + implicit val mergedTraceContext: TraceContext = TraceContext.ofBatch(elems.values)(logger) + + val mergedSigs = elems.flatMap { case (_, event) => event.signedEvent.signatures }.toSeq + val mergedSignedEvent = SignedContent + .create(content, mergedSigs, timestampOfSigningKey, representativeProtocolVersion) + .valueOr(err => + ErrorUtil.invalidState(s"Failed to aggregate signatures on sequenced event: $err") + ) + // TODO(#13789) How should we merge the traffic state as it's currently not part of the bucketing? + val mergedTrafficState = someElem.trafficState + + // We intentionally do not use the copy method + // so that we notice when fields are added + OrdinarySequencedEvent(mergedSignedEvent, mergedTrafficState)(mergedTraceContext) + } + + private def logError[E: Pretty]( + control: SubscriptionControlInternal[E] + )(implicit traceContext: TraceContext): Unit = + control match { + case ActiveSourceTerminated(sequencerId, cause) => + cause.foreach { ex => logger.error(s"Sequencer subscription for $sequencerId failed", ex) } + case NewConfiguration(_, _) => + } + + private class SequencerAggregatorMergeOps[E: Pretty]( + initialCounterOrPriorEvent: Either[SequencerCounter, PossiblyIgnoredSerializedEvent], + health: SequencerAggregatorHealth, + )(implicit val traceContext: TraceContext) + extends OrderedBucketMergeHubOps[ + SequencerId, + OrdinarySerializedEvent, + HasSequencerSubscriptionFactoryPekko[E], + SequencerCounter, + HealthComponent, + ] { + + override type Bucket = SequencerAggregatorPekko.Bucket + + override def prettyBucket: Pretty[Bucket] = implicitly[Pretty[Bucket]] + + override def bucketOf(event: OrdinarySerializedEvent): Bucket = + Bucket( + event.counter, + event.signedEvent.timestampOfSigningKey, + // keep only the content hash instead of the content itself. + // This will allow us to eventually request only signatures from some sequencers to save bandwidth + SignedContent.hashContent( + hashOps, + event.signedEvent.content, + HashPurpose.SequencedEventSignature, + ), + event.signedEvent.representativeProtocolVersion, + // TODO(#13789) What do we do about the traffic state? + // If the traffic state was covered by the signature, we wouldn't need to worry about this here, + // but then the traffic state becomes part of a proof of sequencing and thus needs to be shown to third parties. + // Clearly, this can be avoided with a Merkle tree! + ) + + override def orderingOffset: Ordering[SequencerCounter] = Ordering[SequencerCounter] + + override def offsetOfBucket(bucket: Bucket): SequencerCounter = bucket.sequencerCounter + + /** The initial offset to start from */ + override def exclusiveLowerBoundForBegin: SequencerCounter = initialCounterOrPriorEvent match { + case Left(initial) => initial - 1L + case Right(priorEvent) => priorEvent.counter + } + + override def traceContextOf(event: OrdinarySerializedEvent): TraceContext = + event.traceContext + + override type PriorElement = PossiblyIgnoredSerializedEvent + + override def priorElement: Option[PossiblyIgnoredSerializedEvent] = + initialCounterOrPriorEvent.toOption + + override def toPriorElement( + output: OrderedBucketMergeHub.OutputElement[SequencerId, OrdinarySerializedEvent] + ): PriorElement = mergeBucket(output.elem) + + override def makeSource( + sequencerId: SequencerId, + config: HasSequencerSubscriptionFactoryPekko[E], + exclusiveStart: SequencerCounter, + priorElement: Option[PriorElement], + ): Source[OrdinarySerializedEvent, (KillSwitch, Future[Done], HealthComponent)] = { + val prior = priorElement.collect { case event @ OrdinarySequencedEvent(_, _) => event } + val subscription = eventValidator + .validatePekko(config.subscriptionFactory.create(exclusiveStart), prior, sequencerId) + val source = subscription.source + .buffer(bufferSize.value, OverflowStrategy.backpressure) + .mapConcat(_.unwrap match { + case Left(err) => + // Errors cannot be aggregated because they are specific to a particular sequencer or subscription. + // So we log them here and do not propagate them. + // Health reporting will pick up the termination of the sequencer connection, + // but doesn't need to know the reason for the failure. + logger.warn(s"Sequencer subscription for $sequencerId failed with $err.") + // Note that we cannot tunnel the error through the aggregation as a thrown exception + // because a failure would send a cancellation signal up the stream, + // which we are trying to avoid for clean shutdown reasons. + None + case Right(event) => Some(event) + }) + source.mapMaterializedValue { case (killSwitch, doneF) => + (killSwitch, doneF, subscription.health) + } + } + } +} + +object SequencerAggregatorPekko { + type SubscriptionControl[E] = ControlOutput[ + SequencerId, + HasSequencerSubscriptionFactoryPekko[E], + SequencerCounter, + ] + + private type SubscriptionControlInternal[E] = ControlOutput[ + SequencerId, + (HasSequencerSubscriptionFactoryPekko[E], Option[HealthComponent]), + SequencerCounter, + ] + + trait HasSequencerSubscriptionFactoryPekko[E] { + def subscriptionFactory: SequencerSubscriptionFactoryPekko[E] + } + + private[SequencerAggregatorPekko] final case class Bucket( + sequencerCounter: SequencerCounter, + timestampOfSigningKey: Option[CantonTimestamp], + contentHash: Hash, + representativeProtocolVersion: RepresentativeProtocolVersion[SignedContent.type], + ) extends PrettyPrinting { + override def pretty: Pretty[Bucket] = + prettyOfClass( + param("sequencer counter", _.sequencerCounter), + paramIfDefined("timestamp of signing key", _.timestampOfSigningKey), + param("content hash", _.contentHash), + ) + } + + private[SequencerAggregatorPekko] class SequencerAggregatorHealth( + private val domainId: DomainId, + override protected val associatedOnShutdownRunner: OnShutdownRunner, + override protected val logger: TracedLogger, + ) extends CompositeHealthComponent[SequencerId, HealthComponent] + with PrettyPrinting { + + private val currentThreshold = new AtomicReference[PositiveInt](PositiveInt.one) + + override val name: String = s"sequencer-subscription-$domainId" + + override protected def initialHealthState: ComponentHealthState = + ComponentHealthState.NotInitializedState + + override def closingState: ComponentHealthState = + ComponentHealthState.failed(s"Disconnected from domain $domainId") + + override protected def combineDependentStates: ComponentHealthState = { + val threshold = currentThreshold.get + SequencerAggregator.aggregateHealthResult(getDependencies.fmap(_.getState), threshold) + } + + def updateHealth(control: SubscriptionControlInternal[?]): Unit = { + control match { + case NewConfiguration(newConfig, startingOffset) => + val currentlyRegisteredDependencies = getDependencies + val toRemove = currentlyRegisteredDependencies.keySet diff newConfig.sources.keySet + val toAdd = newConfig.sources.collect { case (id, (_config, Some(health))) => + (id, health) + } + val newThreshold = newConfig.threshold + val previousThreshold = currentThreshold.getAndSet(newThreshold) + alterDependencies(toRemove, toAdd) + // Separately trigger a refresh in case no dependencies had changed. + if (newThreshold != previousThreshold) + refreshFromDependencies()(TraceContext.empty) + case ActiveSourceTerminated(sequencerId, _cause) => + alterDependencies(remove = Set(sequencerId), add = Map.empty) + } + } + + override def pretty: Pretty[SequencerAggregatorHealth] = prettyOfClass( + param("domain id", _.domainId), + param("state", _.getState), + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerClientRecorder.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerClientRecorder.scala new file mode 100644 index 0000000000..d56b068d93 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerClientRecorder.scala @@ -0,0 +1,68 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing + +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.lifecycle.FlagCloseable +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} +import com.digitalasset.canton.sequencing.SequencerClientRecorder.{Extensions, withExtension} +import com.digitalasset.canton.sequencing.protocol.SubmissionRequest +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.tracing.TraceContext.withNewTraceContext +import com.digitalasset.canton.util.MessageRecorder + +import java.nio.file.Path + +/** Record interactions that the Sequencer client has with its domain. + * If enabled will record sends to the Sequencer and events received from the Sequencer subscription. + * Callers must call `start` with a path for recording before recording sequencer interactions. + */ +class SequencerClientRecorder( + path: Path, + override protected val timeouts: ProcessingTimeout, + protected val loggerFactory: NamedLoggerFactory, +) extends FlagCloseable + with NamedLogging { + private val submissionRecorder = new MessageRecorder(timeouts, loggerFactory) + private val eventRecorder = new MessageRecorder(timeouts, loggerFactory) + + withNewTraceContext { implicit traceContext => + logger.debug(s"Starting recording of sequencer interactions to [$path]") + path.getParent.toFile.mkdirs() + + submissionRecorder.startRecording(withExtension(path, Extensions.Submissions)) + eventRecorder.startRecording(withExtension(path, Extensions.Events)) + } + + def recordSubmission(submission: SubmissionRequest): Unit = + submissionRecorder.record(submission) + + def recordEvent(event: OrdinarySerializedEvent): Unit = + eventRecorder.record(event) + + override protected def onClosed(): Unit = { + submissionRecorder.close() + eventRecorder.close() + } +} + +object SequencerClientRecorder { + def withExtension(path: Path, extension: String): Path = + path.resolveSibling(path.getFileName.toString + "." + extension) + + def loadSubmissions(path: Path, logger: TracedLogger)(implicit + traceContext: TraceContext + ): List[SubmissionRequest] = + MessageRecorder.load[SubmissionRequest](withExtension(path, Extensions.Submissions), logger) + + def loadEvents(path: Path, logger: TracedLogger)(implicit + traceContext: TraceContext + ): List[OrdinarySerializedEvent] = + MessageRecorder.load[OrdinarySerializedEvent](withExtension(path, Extensions.Events), logger) + + object Extensions { + val Submissions = "submissions" + val Events = "events" + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnection.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnection.scala new file mode 100644 index 0000000000..ab648d05b3 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnection.scala @@ -0,0 +1,207 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.domain.api.v0 +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.networking.Endpoint +import com.digitalasset.canton.networking.grpc.ClientChannelBuilder +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.tracing.TracingConfig.Propagation +import com.digitalasset.canton.{ProtoDeserializationError, SequencerAlias} +import com.google.protobuf.ByteString +import io.grpc.netty.NettyChannelBuilder + +import java.net.URI +import java.util.concurrent.Executor + +/** Our [[com.digitalasset.canton.config.SequencerConnectionConfig]] provides a flexible structure for configuring how + * the domain and its members talk to a sequencer. It however leaves much information intentionally optional so it can + * be inferred at runtime based on information that may only be available at the point of creating a sequencer + * connection (for instance defaulting to domain connection information that a user has provided in an admin command). + * At this point these structures can then be constructed which contain all the mandatory details that sequencer clients + * need to actually connect. + */ +sealed trait SequencerConnection extends PrettyPrinting { + def withAlias(alias: SequencerAlias): SequencerConnection + + def toProtoV0: v0.SequencerConnection + + @deprecated("Use addEndpoints instead", "2.7.1") + final def addConnection( + connection: String, + additionalConnections: String* + ): SequencerConnection = + addEndpoints(connection, additionalConnections *) + + @deprecated("Use addEndpoints instead", "2.7.1") + final def addConnection( + connection: URI, + additionalConnections: URI* + ): SequencerConnection = addEndpoints(connection, additionalConnections *) + + @deprecated("Use addEndpoints instead", "2.7.1") + final def addConnection( + connection: SequencerConnection, + additionalConnections: SequencerConnection* + ): SequencerConnection = addEndpoints(connection, additionalConnections *) + + def addEndpoints( + connection: String, + additionalConnections: String* + ): SequencerConnection = + addEndpoints(new URI(connection), additionalConnections.map(new URI(_)) *) + + // TODO(#15224) change this to Either + def addEndpoints( + connection: URI, + additionalConnections: URI* + ): SequencerConnection + + def addEndpoints( + connection: SequencerConnection, + additionalConnections: SequencerConnection* + ): SequencerConnection + + def sequencerAlias: SequencerAlias + + def certificates: Option[ByteString] + + def withCertificates(certificates: ByteString): SequencerConnection +} + +final case class GrpcSequencerConnection( + endpoints: NonEmpty[Seq[Endpoint]], + transportSecurity: Boolean, + customTrustCertificates: Option[ByteString], + sequencerAlias: SequencerAlias, +) extends SequencerConnection { + + override def certificates: Option[ByteString] = customTrustCertificates + + def mkChannelBuilder(clientChannelBuilder: ClientChannelBuilder, tracePropagation: Propagation)( + implicit executor: Executor + ): NettyChannelBuilder = + clientChannelBuilder + .create(endpoints, transportSecurity, executor, customTrustCertificates, tracePropagation) + + override def toProtoV0: v0.SequencerConnection = + v0.SequencerConnection( + v0.SequencerConnection.Type.Grpc( + v0.SequencerConnection.Grpc( + endpoints.map(_.toURI(transportSecurity).toString).toList, + transportSecurity, + customTrustCertificates, + ) + ), + sequencerAlias.toProtoPrimitive, + ) + + override def pretty: Pretty[GrpcSequencerConnection] = + prettyOfClass( + param("endpoints", _.endpoints.map(_.toURI(transportSecurity)).toList), + param("transportSecurity", _.transportSecurity), + paramIfTrue("customTrustCertificates", _.customTrustCertificates.nonEmpty), + ) + + override def addEndpoints( + connection: URI, + additionalConnections: URI* + ): SequencerConnection = + (for { + newEndpoints <- Endpoint + .fromUris(NonEmpty(Seq, connection, additionalConnections: _*)) + } yield copy(endpoints = endpoints ++ newEndpoints._1)).valueOr(err => + throw new IllegalArgumentException(err) + ) + + override def addEndpoints( + connection: SequencerConnection, + additionalConnections: SequencerConnection* + ): SequencerConnection = + SequencerConnection + .merge(this +: connection +: additionalConnections) + .valueOr(err => throw new IllegalArgumentException(err)) + + override def withCertificates(certificates: ByteString): SequencerConnection = + copy(customTrustCertificates = Some(certificates)) + + override def withAlias(alias: SequencerAlias): SequencerConnection = copy(sequencerAlias = alias) +} + +object GrpcSequencerConnection { + def create( + connection: String, + customTrustCertificates: Option[ByteString] = None, + sequencerAlias: SequencerAlias = SequencerAlias.Default, + ): Either[String, GrpcSequencerConnection] = + for { + endpointsWithTlsFlag <- Endpoint.fromUris(NonEmpty(Seq, new URI(connection))) + (endpoints, useTls) = endpointsWithTlsFlag + } yield GrpcSequencerConnection(endpoints, useTls, customTrustCertificates, sequencerAlias) + + def tryCreate( + connection: String, + customTrustCertificates: Option[ByteString] = None, + sequencerAlias: SequencerAlias = SequencerAlias.Default, + ): GrpcSequencerConnection = + create(connection, customTrustCertificates, sequencerAlias) match { + case Left(err) => throw new IllegalArgumentException(s"Invalid connection $connection : $err") + case Right(es) => es + } +} + +object SequencerConnection { + + def fromProtoV0( + configP: v0.SequencerConnection + ): ParsingResult[SequencerConnection] = + configP.`type` match { + case v0.SequencerConnection.Type.Empty => Left(ProtoDeserializationError.FieldNotSet("type")) + case v0.SequencerConnection.Type.Grpc(grpc) => fromGrpcProto(grpc, configP.alias) + } + + private def fromGrpcProto( + grpcP: v0.SequencerConnection.Grpc, + alias: String, + ): ParsingResult[SequencerConnection] = + for { + uris <- NonEmpty + .from(grpcP.connections.map(new URI(_))) + .toRight(ProtoDeserializationError.FieldNotSet("connections")) + endpoints <- Endpoint + .fromUris(uris) + .leftMap(err => ProtoDeserializationError.ValueConversionError("connections", err)) + sequencerAlias <- SequencerAlias.fromProtoPrimitive(alias) + } yield GrpcSequencerConnection( + endpoints._1, + grpcP.transportSecurity, + grpcP.customTrustCertificates, + sequencerAlias, + ) + + def merge(connections: Seq[SequencerConnection]): Either[String, SequencerConnection] = + for { + connectionsNel <- NonEmpty + .from(connections) + .toRight("There must be at least one sequencer connection defined") + _ <- Either.cond( + connections.forall(_.sequencerAlias == connectionsNel.head1.sequencerAlias), + (), + "Sequencer connections can only be merged of the same alias", + ) + conn <- connectionsNel.head1 match { + case grpc @ GrpcSequencerConnection(endpoints, _, _, _) => + for { + allMergedEndpoints <- connectionsNel.tail1.flatTraverse { + case grpc: GrpcSequencerConnection => Right(grpc.endpoints.forgetNE) + case _ => Left("Cannot merge grpc and http sequencer connections") + } + } yield grpc.copy(endpoints = endpoints ++ allMergedEndpoints) + } + } yield conn +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnections.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnections.scala new file mode 100644 index 0000000000..541749aa2f --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnections.scala @@ -0,0 +1,193 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing + +import cats.syntax.either.* +import com.daml.nonempty.{NonEmpty, NonEmptyUtil} +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.domain.api.{v0, v1} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.{ParsingResult, parseRequiredNonEmpty} +import com.digitalasset.canton.version.{ + HasVersionedMessageCompanion, + HasVersionedMessageCompanionCommon, + HasVersionedMessageCompanionDbHelpers, + HasVersionedWrapper, + ProtoVersion, + ProtocolVersion, +} +import com.digitalasset.canton.{ProtoDeserializationError, SequencerAlias} +import com.google.protobuf.ByteString + +import java.net.URI + +final case class SequencerConnections private ( + aliasToConnection: NonEmpty[Map[SequencerAlias, SequencerConnection]], + sequencerTrustThreshold: PositiveInt, +) extends HasVersionedWrapper[SequencerConnections] + with PrettyPrinting { + require( + aliasToConnection.sizeIs >= sequencerTrustThreshold.unwrap, + s"sequencerTrustThreshold cannot be greater than number of sequencer connections. Found threshold of $sequencerTrustThreshold and ${aliasToConnection.size} sequencer connections", + ) + + aliasToConnection.foreach { case (alias, connection) => + require( + alias == connection.sequencerAlias, + "SequencerAlias in the Map must match SequencerConnection.sequencerAlias", + ) + } + + def default: SequencerConnection = aliasToConnection.head1._2 + + /** In case of BFT domain - multiple sequencers are required for proper functioning. + * Some functionalities are only available in non-bft domain. + * When nonBftSetup is false, it means that more than one sequencer connection is provided which doesn't imply a bft domain. + */ + def nonBftSetup: Boolean = aliasToConnection.sizeIs == 1 + + def connections: NonEmpty[Seq[SequencerConnection]] = aliasToConnection.map(_._2).toSeq + + def modify( + sequencerAlias: SequencerAlias, + m: SequencerConnection => SequencerConnection, + ): SequencerConnections = + aliasToConnection + .get(sequencerAlias) + .map { connection => + SequencerConnections( + aliasToConnection.updated( + sequencerAlias, + m(connection), + ), + sequencerTrustThreshold, + ) + } + .getOrElse(this) + + def addEndpoints( + sequencerAlias: SequencerAlias, + connection: URI, + additionalConnections: URI* + ): SequencerConnections = + (Seq(connection) ++ additionalConnections).foldLeft(this) { case (acc, elem) => + acc.modify(sequencerAlias, _.addEndpoints(elem)) + } + + def addEndpoints( + sequencerAlias: SequencerAlias, + connection: SequencerConnection, + additionalConnections: SequencerConnection* + ): SequencerConnections = + (Seq(connection) ++ additionalConnections).foldLeft(this) { case (acc, elem) => + acc.modify(sequencerAlias, _.addEndpoints(elem)) + } + + def withCertificates( + sequencerAlias: SequencerAlias, + certificates: ByteString, + ): SequencerConnections = + modify(sequencerAlias, _.withCertificates(certificates)) + + override def pretty: Pretty[SequencerConnections] = + prettyOfParam(_.aliasToConnection.forgetNE) + + def toProtoV0: Seq[v0.SequencerConnection] = connections.map(_.toProtoV0) + + def toProtoV1: v1.SequencerConnections = + new v1.SequencerConnections(connections.map(_.toProtoV0), sequencerTrustThreshold.unwrap) + + override protected def companionObj: HasVersionedMessageCompanionCommon[SequencerConnections] = + SequencerConnections +} + +object SequencerConnections + extends HasVersionedMessageCompanion[SequencerConnections] + with HasVersionedMessageCompanionDbHelpers[SequencerConnections] { + + def single(connection: SequencerConnection): SequencerConnections = + new SequencerConnections( + NonEmpty.mk(Seq, (connection.sequencerAlias, connection)).toMap, + PositiveInt.tryCreate(1), + ) + + def many( + connections: NonEmpty[Seq[SequencerConnection]], + sequencerTrustThreshold: PositiveInt, + ): Either[String, SequencerConnections] = + if (connections.sizeIs == 1) { + Right(SequencerConnections.single(connections.head1)) + } else if (connections.map(_.sequencerAlias).toSet.sizeCompare(connections) < 0) { + val duplicatesAliases = connections.map(_.sequencerAlias).groupBy(identity).collect { + case (alias, aliases) if aliases.lengthCompare(1) > 0 => alias + } + Left(s"Non-unique sequencer aliases detected: $duplicatesAliases") + } else + Either + .catchOnly[IllegalArgumentException]( + new SequencerConnections( + connections.map(conn => (conn.sequencerAlias, conn)).toMap, + sequencerTrustThreshold, + ) + ) + .leftMap(_.getMessage) + + def tryMany( + connections: Seq[SequencerConnection], + sequencerTrustThreshold: PositiveInt, + ): SequencerConnections = { + many(NonEmptyUtil.fromUnsafe(connections), sequencerTrustThreshold).valueOr(err => + throw new IllegalArgumentException(err) + ) + } + + private def fromProtoV0( + fieldName: String, + connections: Seq[v0.SequencerConnection], + sequencerTrustThreshold: PositiveInt, + ): ParsingResult[SequencerConnections] = for { + sequencerConnectionsNes <- parseRequiredNonEmpty( + SequencerConnection.fromProtoV0, + fieldName, + connections, + ) + _ <- Either.cond( + sequencerConnectionsNes.map(_.sequencerAlias).toSet.size == sequencerConnectionsNes.size, + (), + ProtoDeserializationError.ValueConversionError( + fieldName, + "Every sequencer connection must have a unique sequencer alias", + ), + ) + sequencerConnections <- many(sequencerConnectionsNes, sequencerTrustThreshold).leftMap( + ProtoDeserializationError.InvariantViolation(_) + ) + } yield sequencerConnections + + def fromProtoV0( + sequencerConnection: Seq[v0.SequencerConnection], + sequencerTrustThreshold: Int, + ): ParsingResult[SequencerConnections] = + ProtoConverter + .parsePositiveInt(sequencerTrustThreshold) + .flatMap(fromProtoV0("sequencer_connections", sequencerConnection, _)) + + def fromProtoV1( + sequencerConnections: v1.SequencerConnections + ): ParsingResult[SequencerConnections] = + ProtoConverter + .parsePositiveInt(sequencerConnections.sequencerTrustThreshold) + .flatMap(fromProtoV0("sequencer_connections", sequencerConnections.sequencerConnections, _)) + + override def name: String = "sequencer connections" + + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> ProtoCodec( + ProtocolVersion.v30, + supportedProtoVersion(v1.SequencerConnections)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/TrafficControlParameters.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/TrafficControlParameters.scala new file mode 100644 index 0000000000..eff0241dcb --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/TrafficControlParameters.scala @@ -0,0 +1,74 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing + +import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.v0 as protoV0 +import com.digitalasset.canton.sequencing.TrafficControlParameters.{ + DefaultBaseTrafficAmount, + DefaultMaxBaseTrafficAccumulationDuration, + DefaultReadVsWriteScalingFactor, +} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.time + +/** Traffic control configuration values - stored as dynamic domain parameters + * + * @param maxBaseTrafficAmount maximum amount of bytes per maxBaseTrafficAccumulationDuration acquired as "free" traffic per member + * @param readVsWriteScalingFactor multiplier used to compute cost of an event. In per ten-mil (1 / 10 000). Defaults to 200 (=2%). + * A multiplier of 2% means the base cost will be increased by 2% to produce the effective cost. + * @param maxBaseTrafficAccumulationDuration maximum amount of time the base rate traffic will accumulate before being capped + */ +final case class TrafficControlParameters( + maxBaseTrafficAmount: NonNegativeLong = DefaultBaseTrafficAmount, + readVsWriteScalingFactor: PositiveInt = DefaultReadVsWriteScalingFactor, + maxBaseTrafficAccumulationDuration: time.NonNegativeFiniteDuration = + DefaultMaxBaseTrafficAccumulationDuration, +) extends PrettyPrinting { + lazy val baseRate: NonNegativeLong = + NonNegativeLong.tryCreate( + maxBaseTrafficAmount.value / maxBaseTrafficAccumulationDuration.unwrap.toSeconds + ) + + def toProtoV0: protoV0.TrafficControlParameters = protoV0.TrafficControlParameters( + maxBaseTrafficAmount.value, + Some(maxBaseTrafficAccumulationDuration.toProtoPrimitive), + readVsWriteScalingFactor.value, + ) + + override def pretty: Pretty[TrafficControlParameters] = prettyOfClass( + param("max base traffic amount", _.maxBaseTrafficAmount), + param("read vs write scaling factor", _.readVsWriteScalingFactor), + param("max base traffic accumulation duration", _.maxBaseTrafficAccumulationDuration), + ) +} + +object TrafficControlParameters { + // Default is computed such that 10 txs of 20KB can be sequenced during the max traffic accumulation window + val DefaultBaseTrafficAmount: NonNegativeLong = NonNegativeLong.tryCreate(10 * 20 * 1024) + val DefaultReadVsWriteScalingFactor: PositiveInt = + PositiveInt.tryCreate(200) + val DefaultMaxBaseTrafficAccumulationDuration: time.NonNegativeFiniteDuration = + time.NonNegativeFiniteDuration.apply(time.PositiveSeconds.tryOfMinutes(10L)) + + def fromProtoV0( + proto: protoV0.TrafficControlParameters + ): ParsingResult[TrafficControlParameters] = { + for { + maxBaseTrafficAmount <- ProtoConverter.parseNonNegativeLong(proto.maxBaseTrafficAmount) + maxBaseTrafficAccumulationDuration <- ProtoConverter.parseRequired( + time.NonNegativeFiniteDuration.fromProtoPrimitive("max_base_traffic_accumulation_duration"), + "max_base_traffic_accumulation_duration", + proto.maxBaseTrafficAccumulationDuration, + ) + scalingFactor <- ProtoConverter.parsePositiveInt(proto.readVsWriteScalingFactor) + } yield TrafficControlParameters( + maxBaseTrafficAmount, + scalingFactor, + maxBaseTrafficAccumulationDuration, + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationToken.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationToken.scala new file mode 100644 index 0000000000..3ad0310f6a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationToken.scala @@ -0,0 +1,73 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.authentication + +import cats.syntax.either.* +import com.digitalasset.canton.checked +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.crypto.RandomOps +import com.digitalasset.canton.serialization.{ + DefaultDeserializationError, + DeserializationError, + HasCryptographicEvidence, +} +import com.digitalasset.canton.store.db.{DbDeserializationException, DbSerializationException} +import com.digitalasset.canton.util.HexString +import com.google.protobuf.ByteString +import slick.jdbc.{GetResult, SetParameter} + +final case class AuthenticationToken private (private val bytes: ByteString) + extends HasCryptographicEvidence { + def toProtoPrimitive: ByteString = bytes + + def toLengthLimitedHexString: String300 = + // Authentication tokens have at most 150 bytes + checked(String300.tryCreate(HexString.toHexString(this.toProtoPrimitive))) + + override def getCryptographicEvidence: ByteString = bytes +} + +object AuthenticationToken { + + /** As of now, the database schemas can only handle authentication tokens up to a length of 150 bytes. Thus the length of an [[AuthenticationToken]] should never exceed that. + * If we ever want to create an [[AuthenticationToken]] larger than that, we can increase it up to 500 bytes after which we are limited by Oracle length limits. + * See the documentation at [[com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString]] for more details. + */ + val length: Int = 20 + + def generate(randomOps: RandomOps): AuthenticationToken = { + new AuthenticationToken(randomOps.generateRandomByteString(length)) + } + + def fromProtoPrimitive( + bytes: ByteString + ): Either[DeserializationError, AuthenticationToken] = + Either.cond( + bytes.size() == length, + new AuthenticationToken(bytes), + DefaultDeserializationError(s"Authentication token of wrong size: ${bytes.size()}"), + ) + + def tryFromProtoPrimitive(bytes: ByteString): AuthenticationToken = + fromProtoPrimitive(bytes).valueOr(err => + throw new IllegalArgumentException(s"Invalid authentication token: $err") + ) + + implicit val setAuthenticationTokenParameter: SetParameter[AuthenticationToken] = + (token, pp) => pp >> token.toLengthLimitedHexString + + implicit val getAuthenticationTokenResult: GetResult[AuthenticationToken] = GetResult { r => + val hexString = r.nextString() + if (hexString.length > String300.maxLength) + throw new DbDeserializationException( + s"Base16-encoded authentication token of length ${hexString.length} exceeds allowed limit of ${String300.maxLength}." + ) + HexString + .parseToByteString(hexString) + .map(new AuthenticationToken(_)) + .getOrElse( + throw new DbSerializationException(s"Could not deserialize authentication token from db") + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationTokenProvider.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationTokenProvider.scala new file mode 100644 index 0000000000..94203b292d --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationTokenProvider.scala @@ -0,0 +1,177 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.authentication + +import cats.data.EitherT +import cats.syntax.either.* +import cats.syntax.option.* +import com.daml.nameof.NameOf.functionFullName +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.common.domain.ServiceAgreementId +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.config.{NonNegativeFiniteDuration, ProcessingTimeout} +import com.digitalasset.canton.crypto.{Crypto, Fingerprint, Nonce} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.domain.api.v0.Authentication.Response.Value +import com.digitalasset.canton.domain.api.v0.SequencerAuthenticationServiceGrpc.SequencerAuthenticationServiceStub +import com.digitalasset.canton.domain.api.v0.{Authentication, Challenge} +import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.sequencing.authentication.grpc.AuthenticationTokenWithExpiry +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.topology.{DomainId, Member} +import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} +import com.digitalasset.canton.util.retry.Pause +import com.digitalasset.canton.util.retry.RetryUtil.NoExnRetryable +import com.digitalasset.canton.version.ProtocolVersion +import io.grpc.Status + +import scala.concurrent.{ExecutionContext, Future} + +/** Configures authentication token fetching + * + * @param refreshAuthTokenBeforeExpiry how much time before the auth token expires should we fetch a new one? + */ +final case class AuthenticationTokenManagerConfig( + refreshAuthTokenBeforeExpiry: NonNegativeFiniteDuration = + AuthenticationTokenManagerConfig.defaultRefreshAuthTokenBeforeExpiry, + retries: NonNegativeInt = AuthenticationTokenManagerConfig.defaultRetries, + pauseRetries: NonNegativeFiniteDuration = AuthenticationTokenManagerConfig.defaultPauseRetries, +) +object AuthenticationTokenManagerConfig { + val defaultRefreshAuthTokenBeforeExpiry: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.ofSeconds(20) + val defaultRetries: NonNegativeInt = NonNegativeInt.tryCreate(20) + val defaultPauseRetries: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofMillis(500) +} + +/** Fetch an authentication token from the sequencer by using the sequencer authentication service */ +class AuthenticationTokenProvider( + domainId: DomainId, + member: Member, + agreementId: Option[ServiceAgreementId], + crypto: Crypto, + supportedProtocolVersions: Seq[ProtocolVersion], + config: AuthenticationTokenManagerConfig, + override protected val timeouts: ProcessingTimeout, + protected val loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends NamedLogging + with FlagCloseable { + + private def shutdownStatus = + Status.CANCELLED.withDescription("Aborted fetching token due to my node shutdown") + + def generateToken( + authenticationClient: SequencerAuthenticationServiceStub + ): EitherT[Future, Status, AuthenticationTokenWithExpiry] = { + // this should be called by a grpc client interceptor + implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext + performUnlessClosingEitherT(functionFullName, shutdownStatus) { + def generateTokenET: Future[Either[Status, AuthenticationTokenWithExpiry]] = + (for { + challenge <- getChallenge(authenticationClient) + nonce <- Nonce + .fromProtoPrimitive(challenge.nonce) + .leftMap(err => Status.INVALID_ARGUMENT.withDescription(s"Invalid nonce: $err")) + .toEitherT[Future] + token <- authenticate(authenticationClient, nonce, challenge.fingerprints) + } yield token).value + + EitherT { + Pause( + logger, + this, + maxRetries = config.retries.value, + delay = config.pauseRetries.underlying, + operationName = "generate sequencer authentication token", + ).unlessShutdown(FutureUnlessShutdown.outcomeF(generateTokenET), NoExnRetryable) + .onShutdown(Left(shutdownStatus)) + } + } + } + + private def getChallenge( + authenticationClient: SequencerAuthenticationServiceStub + ): EitherT[Future, Status, Challenge.Success] = EitherT { + import com.digitalasset.canton.domain.api.v0.Challenge.Response.Value.{Empty, Failure, Success} + authenticationClient + .challenge( + Challenge + .Request(member.toProtoPrimitive, supportedProtocolVersions.map(_.toProtoPrimitiveS)) + ) + .map(response => response.value) + .map { + case Success(success) => Right(success) + case Failure(Challenge.Failure(code, reason)) => + Left(Status.fromCodeValue(code).withDescription(reason)) + case Empty => + Left( + Status.INTERNAL.withDescription( + "Problem with domain handshake with challenge. Received empty response from domain." + ) + ) + } + } + import cats.syntax.traverse.* + private def authenticate( + authenticationClient: SequencerAuthenticationServiceStub, + nonce: Nonce, + fingerprintsP: Seq[String], + )(implicit tc: TraceContext): EitherT[Future, Status, AuthenticationTokenWithExpiry] = + for { + fingerprintsValid <- fingerprintsP + .traverse(Fingerprint.fromProtoPrimitive) + .leftMap(err => Status.INVALID_ARGUMENT.withDescription(err.toString)) + .toEitherT[Future] + fingerprintsNel <- NonEmpty + .from(fingerprintsValid) + .toRight( + Status.INVALID_ARGUMENT + .withDescription(s"Failed to deserialize fingerprints $fingerprintsP") + ) + .toEitherT[Future] + signature <- ParticipantAuthentication + .signDomainNonce( + member, + nonce, + domainId, + fingerprintsNel, + agreementId, + crypto, + ) + .leftMap(err => Status.INTERNAL.withDescription(err.toString)) + token <- EitherT { + authenticationClient + .authenticate( + Authentication.Request( + member = member.toProtoPrimitive, + signature = signature.toProtoV0.some, + nonce = nonce.toProtoPrimitive, + ) + ) + .map(response => response.value) + .map { + case Value.Success(Authentication.Success(tokenP, expiryOP)) => + (for { + token <- AuthenticationToken.fromProtoPrimitive(tokenP).leftMap(_.toString) + expiresAtP <- ProtoConverter.required("expires_at", expiryOP).leftMap(_.toString) + expiresAt <- CantonTimestamp.fromProtoPrimitive(expiresAtP).leftMap(_.toString) + } yield AuthenticationTokenWithExpiry(token, expiresAt)) + .leftMap(err => + Status.INTERNAL.withDescription(s"Received invalid authentication token: $err") + ) + case Value.Failure(Authentication.Failure(code, reason)) => + Left(Status.fromCodeValue(code).withDescription(reason)) + case Value.Empty => + Left( + Status.INTERNAL.withDescription( + "Problem authenticating participant. Received empty response from domain." + ) + ) + } + } + } yield token + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/MemberAuthentication.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/MemberAuthentication.scala new file mode 100644 index 0000000000..45f85e3d76 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/MemberAuthentication.scala @@ -0,0 +1,157 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.authentication + +import cats.data.EitherT +import cats.syntax.parallel.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.common.domain.ServiceAgreementId +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.sequencing.authentication.MemberAuthentication.{ + AuthenticationError, + FailedToSign, + NoKeysRegistered, +} +import com.digitalasset.canton.topology.{DomainId, *} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.FutureInstances.* + +import scala.concurrent.{ExecutionContext, Future} + +sealed trait MemberAuthentication { + + def hashDomainNonce( + nonce: Nonce, + domainId: DomainId, + agreementId: Option[ServiceAgreementId], + pureCrypto: CryptoPureApi, + ): Hash + + /** Participant concatenates the nonce with the domain's id and signs it (step 3) + */ + def signDomainNonce( + member: Member, + nonce: Nonce, + domainId: DomainId, + possibleSigningKeys: NonEmpty[Seq[Fingerprint]], + agreementId: Option[ServiceAgreementId], + crypto: Crypto, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[Future, AuthenticationError, Signature] = { + val hash = hashDomainNonce(nonce, domainId, agreementId, crypto.pureCrypto) + + for { + // see if we have any of the possible keys that could be used to sign + availableSigningKey <- possibleSigningKeys.forgetNE + .parFilterA(key => crypto.cryptoPrivateStore.existsSigningKey(key)) + .map(_.headOption) // the first we find is as good as any + .leftMap(_ => NoKeysRegistered(member)) + .subflatMap(_.toRight[AuthenticationError](NoKeysRegistered(member))) + sig <- crypto.privateCrypto + .sign(hash, availableSigningKey) + .leftMap[AuthenticationError](FailedToSign(member, _)) + } yield sig + } + + /** Hash the common fields of the nonce. + * Implementations of MemberAuthentication can then add their own fields as appropriate. + */ + protected def commonNonce(pureApi: CryptoPureApi, nonce: Nonce, domainId: DomainId): HashBuilder = + pureApi + .build(HashPurpose.AuthenticationToken) + .addWithoutLengthPrefix( + nonce.getCryptographicEvidence + ) // Nonces have a fixed length so it's fine to not add a length prefix + .add(domainId.toProtoPrimitive) +} + +object ParticipantAuthentication extends MemberAuthentication { + def hashDomainNonce( + nonce: Nonce, + domainId: DomainId, + agreementId: Option[ServiceAgreementId], + pureApi: CryptoPureApi, + ): Hash = { + val builder = commonNonce(pureApi, nonce, domainId) + agreementId.foreach(ag => builder.add(ag.unwrap)) + builder.finish() + } +} + +object DomainEntityAuthentication extends MemberAuthentication { + override def hashDomainNonce( + nonce: Nonce, + domainId: DomainId, + agreementId: Option[ServiceAgreementId], + pureApi: CryptoPureApi, + ): Hash = + // we don't expect domain entities to use the agreement-id, so just exclude it + commonNonce(pureApi, nonce, domainId).finish() +} + +object MemberAuthentication { + + import com.digitalasset.canton.util.ShowUtil.* + + def apply(member: Member): Either[AuthenticationError, MemberAuthentication] = member match { + case _: ParticipantId => Right(ParticipantAuthentication) + case _: MediatorId => Right(DomainEntityAuthentication) + case _: DomainTopologyManagerId => Right(DomainEntityAuthentication) + case _: SequencerId => Left(AuthenticationNotSupportedForMember(member)) + case _: UnauthenticatedMemberId => Left(AuthenticationNotSupportedForMember(member)) + } + + sealed abstract class AuthenticationError(val reason: String, val code: String) + final case class NoKeysRegistered(member: Member) + extends AuthenticationError(s"Member $member has no keys registered", "NoKeysRegistered") + final case class FailedToSign(member: Member, error: SigningError) + extends AuthenticationError("Failed to sign nonce", "FailedToSign") + final case class MissingNonce(member: Member) + extends AuthenticationError( + s"Member $member has not been previously assigned a handshake nonce", + "MissingNonce", + ) + final case class InvalidSignature(member: Member) + extends AuthenticationError( + s"Given signature for member $member is invalid", + "InvalidSignature", + ) + final case class MissingToken(member: Member) + extends AuthenticationError( + s"Authentication token for member $member has expired. Please reauthenticate.", + "MissingToken", + ) + final case class NonMatchingDomainId(member: Member, domainId: DomainId) + extends AuthenticationError( + show"Domain id $domainId provided by member $member does not match the domain id of the domain the participant is trying to connect to", + "NonMatchingDomainId", + ) + final case class ParticipantDisabled(participantId: ParticipantId) + extends AuthenticationError(s"Participant $participantId is disabled", "ParticipantDisabled") + + final case class MediatorDisabled(mediator: MediatorId) + extends AuthenticationError(s"Mediator $mediator is disabled", "MediatorDisabled") + + final case class TokenVerificationException(member: String) + extends AuthenticationError( + s"Due to an internal error, the server side token lookup for member $member failed", + "VerifyTokenTimeout", + ) + final case class ServiceAgreementAcceptanceError(member: Member, error: String) + extends AuthenticationError(reason = error, code = "ServiceAgreementAcceptanceError") + final case class AuthenticationNotSupportedForMember(member: Member) + extends AuthenticationError( + reason = s"Authentication for member type is not supported: $member", + code = "UnsupportedMember", + ) + final object PassiveSequencer + extends AuthenticationError( + reason = + "Sequencer is currently passive. Connect to a different sequencer and retry the request or wait for the sequencer to become active again.", + code = "PassiveSequencer", + ) + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/grpc/AuthenticationTokenManager.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/grpc/AuthenticationTokenManager.scala new file mode 100644 index 0000000000..63454cf642 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/grpc/AuthenticationTokenManager.scala @@ -0,0 +1,150 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.authentication.grpc + +import cats.data.EitherT +import cats.implicits.* +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.sequencing.authentication.{ + AuthenticationToken, + AuthenticationTokenManagerConfig, +} +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} +import com.digitalasset.canton.util.Thereafter.syntax.* +import io.grpc.Status + +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.{ExecutionContext, Future, Promise, blocking} +import scala.util.{Failure, Success} + +final case class AuthenticationTokenWithExpiry( + token: AuthenticationToken, + expiresAt: CantonTimestamp, +) + +/** Attempts to hold a valid authentication token. + * The first token will not be fetched until `getToken` is called for the first time. + * Subsequent calls to `getToken` before the token is obtained will be resolved for the first token. + * `getToken` always returns a `EitherT[Future, ...]` but if a token is already available will be completed immediately with that token. + */ +class AuthenticationTokenManager( + obtainToken: TraceContext => EitherT[Future, Status, AuthenticationTokenWithExpiry], + isClosed: => Boolean, + config: AuthenticationTokenManagerConfig, + clock: Clock, + protected val loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext) + extends NamedLogging { + + sealed trait State + case object NoToken extends State + case class Refreshing(pending: EitherT[Future, Status, AuthenticationTokenWithExpiry]) + extends State + case class HaveToken(token: AuthenticationToken) extends State + + private val state = new AtomicReference[State](NoToken) + + /** Request a token. + * If a token is immediately available the returned future will be immediately completed. + * If there is no token it will cause a token refresh to start and be completed once obtained. + * If there is a refresh already in progress it will be completed with this refresh. + */ + def getToken: EitherT[Future, Status, AuthenticationToken] = blocking { + // updates must be synchronized, as we are triggering refreshes from here + // and the AtomicReference.updateAndGet requires the update to be side-effect free + synchronized { + state.get() match { + // we are already refreshing, so pass future result + case Refreshing(pending) => pending.map(_.token) + // we have a token, so share it + case HaveToken(token) => EitherT.rightT[Future, Status](token) + // there is no token yet, so start refreshing and return pending result + case NoToken => + createRefreshTokenFuture() + } + } + } + + /** Invalid the current token if it matches the provided value. + * Although unlikely, the token must be provided here in case a response terminates after a new token has already been generated. + */ + def invalidateToken(invalidToken: AuthenticationToken): Unit = { + val _ = state.updateAndGet { + case HaveToken(token) if invalidToken == token => NoToken + case other => other + } + } + + private def createRefreshTokenFuture(): EitherT[Future, Status, AuthenticationToken] = { + implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext + val syncP = Promise[Unit]() + val refresh = EitherT.right(syncP.future).flatMap(_ => obtainToken(traceContext)) + + logger.debug("Refreshing authentication token") + + def completeRefresh(result: State): Unit = { + state.updateAndGet { + case Refreshing(pending) if pending == refresh => result + case other => other + }.discard + } + + // asynchronously update the state once completed, one way or another + val refreshTransformed = refresh.value.thereafter { + case Failure(exception) => + exception match { + case ex: io.grpc.StatusRuntimeException + if ex.getStatus.getCode == io.grpc.Status.Code.CANCELLED => + logger.info("Token refresh cancelled", ex) + case ex: io.grpc.StatusRuntimeException + if ex.getStatus.getCode == io.grpc.Status.Code.UNAVAILABLE && + ex.getMessage.contains("Channel shutdown invoked") => + logger.info("Token refresh aborted due to shutdown", ex) + case ex: io.grpc.StatusRuntimeException => + def collectCause(ex: Throwable): Seq[String] = { + Seq(ex.getMessage) ++ Option(ex.getCause).toList.flatMap(collectCause) + } + val causes = collectCause(ex).mkString(", ") + logger.warn(s"Token refresh failed with ${ex.getStatus} / $causes") + case _ => logger.warn("Token refresh failed", exception) + } + completeRefresh(NoToken) + case Success(Left(error)) => + if (error.getCode == Status.Code.CANCELLED) + logger.debug("Token refresh cancelled due to shutdown") + else + logger.warn(s"Token refresh encountered error: $error") + completeRefresh(NoToken) + case Success(Right(AuthenticationTokenWithExpiry(newToken, expiresAt))) => + logger.debug("Token refresh complete") + scheduleRefreshBefore(expiresAt) + completeRefresh(HaveToken(newToken)) + } + + val res = Refreshing(refresh) + state.set(res) + // only kick off computation once the state is set + syncP.success(()) + EitherT(refreshTransformed).map(_.token) + } + + private def scheduleRefreshBefore(expiresAt: CantonTimestamp): Unit = { + if (!isClosed) { + clock + .scheduleAt( + backgroundRefreshToken, + expiresAt.minus(config.refreshAuthTokenBeforeExpiry.asJava), + ) + .discard + } + } + + private def backgroundRefreshToken(_now: CantonTimestamp): Unit = if (!isClosed) { + createRefreshTokenFuture().discard + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/grpc/Constant.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/grpc/Constant.scala new file mode 100644 index 0000000000..807dd53561 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/grpc/Constant.scala @@ -0,0 +1,38 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.authentication.grpc + +import com.digitalasset.canton.config.RequireTypes.Port +import com.digitalasset.canton.networking.Endpoint +import com.digitalasset.canton.sequencing.authentication.AuthenticationToken +import com.google.protobuf.ByteString +import io.grpc.Metadata +import io.grpc.Metadata.{ASCII_STRING_MARSHALLER, AsciiMarshaller, BinaryMarshaller} + +object Constant { + val AUTH_TOKEN_MARSHALLER = new BinaryMarshaller[AuthenticationToken] { + override def toBytes(value: AuthenticationToken): Array[Byte] = + value.getCryptographicEvidence.toByteArray + override def parseBytes(serialized: Array[Byte]): AuthenticationToken = + AuthenticationToken.tryFromProtoPrimitive(ByteString.copyFrom(serialized)) + } + val ENDPOINT_MARSHALLER = new AsciiMarshaller[Endpoint] { + override def toAsciiString(value: Endpoint): String = value.toString + override def parseAsciiString(serialized: String): Endpoint = { + val l = serialized.split(":") + Endpoint(l(0), Port.tryCreate(l(1).toInt)) + } + } + + val AUTH_TOKEN_METADATA_KEY: Metadata.Key[AuthenticationToken] = + Metadata.Key.of("authToken-bin", AUTH_TOKEN_MARSHALLER) + val ENDPOINT_METADATA_KEY: Metadata.Key[Endpoint] = + Metadata.Key.of("endpoint", ENDPOINT_MARSHALLER) + val MEMBER_ID_METADATA_KEY: Metadata.Key[String] = + Metadata.Key.of("memberId", ASCII_STRING_MARSHALLER) + val DOMAIN_ID_METADATA_KEY: Metadata.Key[String] = + Metadata.Key.of("domainId", ASCII_STRING_MARSHALLER) + val AUTHENTICATION_ERROR_CODE: Metadata.Key[String] = + Metadata.Key.of("authErrorCode", ASCII_STRING_MARSHALLER) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/grpc/SequencerClientAuthentication.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/grpc/SequencerClientAuthentication.scala new file mode 100644 index 0000000000..10444f957a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/grpc/SequencerClientAuthentication.scala @@ -0,0 +1,215 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.authentication.grpc + +import cats.data.EitherT +import cats.implicits.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.networking.Endpoint +import com.digitalasset.canton.sequencing.authentication.{ + AuthenticationToken, + AuthenticationTokenManagerConfig, +} +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.topology.{AuthenticatedMember, DomainId, UnauthenticatedMemberId} +import com.digitalasset.canton.tracing.TraceContext +import com.google.common.annotations.VisibleForTesting +import io.grpc.ForwardingClientCall.SimpleForwardingClientCall +import io.grpc.ForwardingClientCallListener.SimpleForwardingClientCallListener +import io.grpc.* +import io.grpc.internal.GrpcAttributes +import io.grpc.stub.AbstractStub + +import java.util.concurrent.Executor +import scala.concurrent.{ExecutionContext, Future} +import scala.util.control.NonFatal + +/** Provides call credentials and an interceptor to generate a token for outgoing requests and add the token to the call + * metadata, then cause the token to be invalidated if an UNAUTHORIZED response is returned. + */ +private[grpc] class SequencerClientTokenAuthentication( + domainId: DomainId, + member: AuthenticatedMember, + tokenManagerPerEndpoint: NonEmpty[Map[Endpoint, AuthenticationTokenManager]], + protected val loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext) + extends SequencerClientAuthentication + with NamedLogging { + + /** Apply the sequencer authentication components to a grpc client stub */ + def apply[S <: AbstractStub[S]](client: S): S = + client.withCallCredentials(callCredentials).withInterceptors(reauthorizationInterceptor) + + private def getTokenManager(maybeEndpoint: Option[Endpoint]) = (for { + endpoint <- maybeEndpoint + tokenManager <- tokenManagerPerEndpoint.get(endpoint) + } yield tokenManager).getOrElse(tokenManagerPerEndpoint.head1._2) + + /** Asks token manager for the current auth token and applies it to outgoing requests */ + @VisibleForTesting + private[grpc] val callCredentials: CallCredentials = new CallCredentials { + override def applyRequestMetadata( + requestInfo: CallCredentials.RequestInfo, + appExecutor: Executor, + applier: CallCredentials.MetadataApplier, + ): Unit = { + val maybeEndpoint = for { + clientEagAttrs <- Option( + requestInfo.getTransportAttrs.get(GrpcAttributes.ATTR_CLIENT_EAG_ATTRS) + ) + endpoint <- Option(clientEagAttrs.get(Endpoint.ATTR_ENDPOINT)) + } yield endpoint + val tokenManager = getTokenManager(maybeEndpoint) + + tokenManager.getToken + .leftMap(err => + Status.PERMISSION_DENIED.withDescription(s"Authentication token refresh error: $err") + ) + .value + .recover { + case grpcError: StatusRuntimeException => + // if auth token refresh fails with a grpc error, pass along that status so that the grpc subscription retry + // mechanism can base the retry decision on it. + Left( + grpcError.getStatus + .withDescription("Authentication token refresh failed with grpc error") + ) + case NonFatal(ex) => + // otherwise indicate internal error + Left( + Status.INTERNAL + .withDescription("Authentication token refresh failed with exception") + .withCause(ex) + ) + } + .foreach { + case Left(errorStatus) => applier.fail(errorStatus) + case Right(token) => applier.apply(generateMetadata(token, maybeEndpoint)) + } + } + + override def thisUsesUnstableApi(): Unit = { + // yes, we know - cheers grpc + } + } + + /** Will invalidate the current token if an UNAUTHORIZED response is observed. + * This will typically happen after a token has expired. + * Note the caller will still receive the UNAUTHORIZED response, + * although there are approaches for buffering and retrying the request this would not + * work for all cases (such as a streamed response). + * Instead the caller is expected to retry the request which will attempt to fetch + * a new authorization token as the prior response invalidated the previous token. + */ + @VisibleForTesting + private[grpc] val reauthorizationInterceptor = new ClientInterceptor { + override def interceptCall[ReqT, RespT]( + method: MethodDescriptor[ReqT, RespT], + callOptions: CallOptions, + next: Channel, + ): ClientCall[ReqT, RespT] = { + new ReauthorizeClientCall(next.newCall(method, callOptions)) + } + + private class ReauthorizeClientCall[ReqT, RespT](call: ClientCall[ReqT, RespT]) + extends SimpleForwardingClientCall[ReqT, RespT](call) { + + override def start(responseListener: ClientCall.Listener[RespT], headers: Metadata): Unit = { + super.start(new ReauthorizeClientCallListener(responseListener), headers) + } + + private class ReauthorizeClientCallListener(responseListener: ClientCall.Listener[RespT]) + extends SimpleForwardingClientCallListener[RespT](responseListener) { + override def onClose(status: Status, trailers: Metadata): Unit = { + if (status.getCode == Status.UNAUTHENTICATED.getCode) { + val tokenManager = Option(trailers.get(Constant.ENDPOINT_METADATA_KEY)) + .flatMap(tokenManagerPerEndpoint.get) + .getOrElse(tokenManagerPerEndpoint.head1._2) + Option(trailers.get(Constant.AUTH_TOKEN_METADATA_KEY)) + .foreach(tokenManager.invalidateToken) + } + + super.onClose(status, trailers) + } + } + + } + } + + private def generateMetadata( + token: AuthenticationToken, + maybeEndpoint: Option[Endpoint], + ): Metadata = { + val metadata = new Metadata() + metadata.put(Constant.MEMBER_ID_METADATA_KEY, member.toProtoPrimitive) + metadata.put(Constant.AUTH_TOKEN_METADATA_KEY, token) + metadata.put(Constant.DOMAIN_ID_METADATA_KEY, domainId.toProtoPrimitive) + maybeEndpoint.foreach(endpoint => metadata.put(Constant.ENDPOINT_METADATA_KEY, endpoint)) + metadata + } +} + +object SequencerClientTokenAuthentication { + def apply( + domainId: DomainId, + authenticatedMember: AuthenticatedMember, + obtainTokenPerEndpoint: NonEmpty[ + Map[Endpoint, TraceContext => EitherT[Future, Status, AuthenticationTokenWithExpiry]] + ], + isClosed: => Boolean, + tokenManagerConfig: AuthenticationTokenManagerConfig, + clock: Clock, + loggerFactory: NamedLoggerFactory, + )(implicit executionContext: ExecutionContext): SequencerClientAuthentication = { + val tokenManagerPerEndpoint = obtainTokenPerEndpoint.transform { case (_, obtainToken) => + new AuthenticationTokenManager( + obtainToken, + isClosed, + tokenManagerConfig, + clock, + loggerFactory, + ) + } + new SequencerClientTokenAuthentication( + domainId, + authenticatedMember, + tokenManagerPerEndpoint, + loggerFactory, + ) + } + +} + +class SequencerClientNoAuthentication(domainId: DomainId, member: UnauthenticatedMemberId) + extends SequencerClientAuthentication { + + private val metadata: Metadata = { + val metadata = new Metadata() + metadata.put(Constant.MEMBER_ID_METADATA_KEY, member.toProtoPrimitive) + metadata.put(Constant.DOMAIN_ID_METADATA_KEY, domainId.toProtoPrimitive) + metadata + } + + override def apply[S <: AbstractStub[S]](client: S): S = + client.withCallCredentials(callCredentials) + + @VisibleForTesting + private[grpc] val callCredentials: CallCredentials = new CallCredentials { + override def applyRequestMetadata( + requestInfo: CallCredentials.RequestInfo, + appExecutor: Executor, + applier: CallCredentials.MetadataApplier, + ): Unit = applier.apply(metadata) + override def thisUsesUnstableApi(): Unit = { + // yes, we know - cheers grpc + } + } +} + +trait SequencerClientAuthentication { + + /** Apply the sequencer authentication components to a grpc client stub */ + def apply[S <: AbstractStub[S]](client: S): S +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/DelayedSequencerClient.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/DelayedSequencerClient.scala new file mode 100644 index 0000000000..49bdc08adf --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/DelayedSequencerClient.scala @@ -0,0 +1,77 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import com.digitalasset.canton.sequencing.OrdinarySerializedEvent +import com.digitalasset.canton.sequencing.client.DelayedSequencerClient.{ + Immediate, + SequencedEventDelayPolicy, +} +import com.digitalasset.canton.topology.DomainId + +import java.util.concurrent.atomic.AtomicReference +import scala.collection.concurrent +import scala.collection.concurrent.TrieMap +import scala.concurrent.Future + +trait DelaySequencedEvent { + def delay(event: OrdinarySerializedEvent): Future[Unit] +} + +case object NoDelay extends DelaySequencedEvent { + override def delay(event: OrdinarySerializedEvent): Future[Unit] = Future.unit +} + +final case class DelayedSequencerClient(domain: DomainId, member: String) + extends DelaySequencedEvent { + + private[this] val onPublish: AtomicReference[SequencedEventDelayPolicy] = + new AtomicReference[SequencedEventDelayPolicy](_ => Immediate) + + def setDelayPolicy(publishPolicy: SequencedEventDelayPolicy): Unit = { + onPublish.set(publishPolicy) + } + + override def delay(event: OrdinarySerializedEvent): Future[Unit] = { + val temp = onPublish.get() + temp(event).until + } +} + +object DelayedSequencerClient { + + private val clients: concurrent.Map[(String, DomainId, String), DelayedSequencerClient] = + new TrieMap[(String, DomainId, String), DelayedSequencerClient] + + def delayedSequencerClient( + environmentId: String, + domainId: DomainId, + member: String, + ): Option[DelayedSequencerClient] = { + clients.get((environmentId, domainId, member)) + } + + def registerAndCreate( + environmentId: String, + domainId: DomainId, + member: String, + ): DelayedSequencerClient = { + val delayedLog = new DelayedSequencerClient(domainId, member) + clients.putIfAbsent((environmentId, domainId, member), delayedLog).discard + delayedLog + } + + trait SequencedEventDelayPolicy extends (OrdinarySerializedEvent => DelaySequencerClient) + + sealed trait DelaySequencerClient { + val until: Future[Unit] + } + + case object Immediate extends DelaySequencerClient { + override val until: Future[Unit] = Future.unit + } + + final case class DelayUntil(override val until: Future[Unit]) extends DelaySequencerClient + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/PeriodicAcknowledgements.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/PeriodicAcknowledgements.scala new file mode 100644 index 0000000000..24e365b380 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/PeriodicAcknowledgements.scala @@ -0,0 +1,123 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.store.SequencerCounterTrackerStore +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.tracing.TraceContext.withNewTraceContext +import com.digitalasset.canton.tracing.{TraceContext, Traced} +import com.digitalasset.canton.util.HasFlushFuture +import com.google.common.annotations.VisibleForTesting + +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{ExecutionContext, Future} +import scala.jdk.DurationConverters.* + +/** Periodically pull the latest clean timestamp and if it has changed acknowledge it with the sequencer. + * This indicates that we have successfully processed all events up to and including this event. + * We always acknowledge the current clean timestamp on startup if available to indicate to the sequencer that we are + * running. The periodic interval is based on the host clock not in sequencer time, however any drift is likely + * insignificant for the purpose of the sequencer acknowledgements (pruning hourly/daily). + * Errors are logged at error level - periodic failures are likely not problematic however continuous errors + * could eventually be problematic for the sequencer operator. + */ +class PeriodicAcknowledgements( + isHealthy: => Boolean, + interval: FiniteDuration, + fetchLatestCleanTimestamp: TraceContext => Future[Option[CantonTimestamp]], + acknowledge: Traced[CantonTimestamp] => Future[Unit], + clock: Clock, + override protected val timeouts: ProcessingTimeout, + protected val loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext) + extends NamedLogging + with FlagCloseable + with HasFlushFuture { + private val priorAckRef = new AtomicReference[Option[CantonTimestamp]](None) + + private def update(): Unit = + withNewTraceContext { implicit traceContext => + def ackIfChanged(timestamp: CantonTimestamp): Future[Unit] = { + val priorAck = priorAckRef.getAndSet(Some(timestamp)) + val changed = !priorAck.contains(timestamp) + if (changed) { + logger.debug(s"Acknowledging clean timestamp: $timestamp") + acknowledge(Traced(timestamp)) + } else Future.unit + } + + if (isHealthy) { + val updateF = performUnlessClosingF(functionFullName) { + for { + latestClean <- fetchLatestCleanTimestamp(traceContext) + _ <- latestClean.fold(Future.unit)(ackIfChanged) + } yield () + }.onShutdown( + logger.debug("Acknowledging sequencer timestamp skipped due to shutdown") + ) + addToFlushAndLogError("periodic acknowledgement")(updateF) + } + } + + private def scheduleNextUpdate(): Unit = { + clock + .scheduleAfter(_ => update(), interval.toJava) + .map(_ => scheduleNextUpdate()) + .discard[FutureUnlessShutdown[Unit]] + } + + @VisibleForTesting + def flush(): Future[Unit] = doFlush() + + // perform one update immediate and then schedule the next + update() + scheduleNextUpdate() +} + +object PeriodicAcknowledgements { + type FetchCleanTimestamp = TraceContext => Future[Option[CantonTimestamp]] + val noAcknowledgements: FetchCleanTimestamp = _ => Future.successful(None) + + def create( + interval: FiniteDuration, + isHealthy: => Boolean, + client: SequencerClient, + fetchCleanTimestamp: FetchCleanTimestamp, + clock: Clock, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit executionContext: ExecutionContext): PeriodicAcknowledgements = { + new PeriodicAcknowledgements( + isHealthy, + interval, + fetchCleanTimestamp, + Traced.lift((ts, tc) => + client + .acknowledgeSigned(ts)(tc) + .foldF( + e => if (client.isClosing) Future.unit else Future.failed(new RuntimeException(e)), + _ => Future.unit, + ) + ), + clock, + timeouts, + loggerFactory, + ) + } + + def fetchCleanCounterFromStore( + counterTrackerStore: SequencerCounterTrackerStore + )(implicit executionContext: ExecutionContext): FetchCleanTimestamp = + traceContext => + for { + cursorO <- counterTrackerStore.preheadSequencerCounter(traceContext) + timestampO = cursorO.map(_.timestamp) + } yield timestampO +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/ReplayConfig.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/ReplayConfig.scala new file mode 100644 index 0000000000..11f2c907af --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/ReplayConfig.scala @@ -0,0 +1,64 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import com.digitalasset.canton.sequencing.client.transports.replay.ReplayingSendsSequencerClientTransport +import com.digitalasset.canton.time.NonNegativeFiniteDuration + +import java.nio.file.Path +import scala.concurrent.{Future, Promise} + +/** Configuration for where to record sequencer sends and events to. + * @param directory Root directory for holding all recording files + * @param filename Filename that is initially empty and updated to a name based on the member-id at runtime. + * Use [[setFilename]] to ensure this can only be set once. + */ +final case class RecordingConfig(directory: Path, filename: Option[String] = None) { + def setFilename(value: String): RecordingConfig = filename.fold(copy(filename = Some(value))) { + existingFilename => + sys.error(s"Recording filename has already been set: $existingFilename") + } + + /** Gets the full filepath and throws if the filepath has not yet been set. */ + lazy val fullFilePath: Path = filename.fold(sys.error("filename has not been set")) { filename => + directory.resolve(filename) + } +} + +/** Configuration for setting up a sequencer client to replay requests or received events. + * @param recordingConfig The path to where all recorded content is stored + * @param action What type of replay we'll be performing + */ +final case class ReplayConfig(recordingConfig: RecordingConfig, action: ReplayAction) + +object ReplayConfig { + def apply(recordingBasePath: Path, action: ReplayAction): ReplayConfig = + ReplayConfig(RecordingConfig(recordingBasePath), action) +} + +sealed trait ReplayAction + +object ReplayAction { + + /** Replay events received from the sequencer */ + case object SequencerEvents extends ReplayAction + + /** Replay sends that were made to the sequencer. + * Tests can control the [[transports.replay.ReplayingSendsSequencerClientTransport]] once constructed + * by waiting for the `transport` future to be completed with the transport instance. + */ + final case class SequencerSends( + sendTimeout: NonNegativeFiniteDuration = NonNegativeFiniteDuration.tryOfSeconds(20), + private val transportP: Promise[ReplayingSendsSequencerClientTransport] = + Promise[ReplayingSendsSequencerClientTransport](), + usePekko: Boolean = false, + ) extends ReplayAction { + + /** Used by the transport to notify a test that the transport is ready */ + private[client] def publishTransport(transport: ReplayingSendsSequencerClientTransport): Unit = + transportP.success(transport) + + val transport: Future[ReplayingSendsSequencerClientTransport] = transportP.future + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/RequestSigner.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/RequestSigner.scala new file mode 100644 index 0000000000..e09c2eb104 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/RequestSigner.scala @@ -0,0 +1,60 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import cats.data.EitherT +import com.digitalasset.canton.crypto.{DomainSyncCryptoClient, HashPurpose} +import com.digitalasset.canton.sequencing.protocol.SignedContent +import com.digitalasset.canton.serialization.HasCryptographicEvidence +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.ProtocolVersion + +import scala.concurrent.{ExecutionContext, Future} + +trait RequestSigner { + def signRequest[A <: HasCryptographicEvidence]( + request: A, + hashPurpose: HashPurpose, + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): EitherT[Future, String, SignedContent[A]] +} + +object RequestSigner { + def apply( + topologyClient: DomainSyncCryptoClient, + protocolVersion: ProtocolVersion, + ): RequestSigner = new RequestSigner { + override def signRequest[A <: HasCryptographicEvidence]( + request: A, + hashPurpose: HashPurpose, + )(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): EitherT[Future, String, SignedContent[A]] = { + val snapshot = topologyClient.headSnapshot + SignedContent + .create( + topologyClient.pureCrypto, + snapshot, + request, + Some(snapshot.ipsSnapshot.timestamp), + hashPurpose, + protocolVersion, + ) + .leftMap(_.toString) + } + } + + /** Request signer for unauthenticated members: never signs anything */ + object UnauthenticatedRequestSigner extends RequestSigner { + override def signRequest[A <: HasCryptographicEvidence](request: A, hashPurpose: HashPurpose)( + implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): EitherT[Future, String, SignedContent[A]] = + EitherT.leftT("Unauthenticated members do not sign submission requests") + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/ResilientSequencerSubscriberPekko.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/ResilientSequencerSubscriberPekko.scala new file mode 100644 index 0000000000..ba62c47e2d --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/ResilientSequencerSubscriberPekko.scala @@ -0,0 +1,311 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import cats.syntax.either.* +import com.digitalasset.canton.SequencerCounter +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.health.{AtomicHealthComponent, ComponentHealthState} +import com.digitalasset.canton.lifecycle.{FlagCloseable, OnShutdownRunner} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} +import com.digitalasset.canton.sequencing.OrdinarySerializedEvent +import com.digitalasset.canton.sequencing.SequencerAggregatorPekko.HasSequencerSubscriptionFactoryPekko +import com.digitalasset.canton.sequencing.client.ResilientSequencerSubscription.LostSequencerSubscription +import com.digitalasset.canton.sequencing.client.transports.SequencerClientTransportPekko +import com.digitalasset.canton.sequencing.protocol.SubscriptionRequest +import com.digitalasset.canton.topology.{Member, SequencerId} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.PekkoUtil.{RetrySourcePolicy, WithKillSwitch} +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.{LoggerUtil, PekkoUtil} +import com.digitalasset.canton.version.ProtocolVersion +import org.apache.pekko.Done +import org.apache.pekko.stream.scaladsl.Source +import org.apache.pekko.stream.{AbruptStageTerminationException, KillSwitch, Materializer} + +import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{ExecutionContext, Future} + +/** Attempts to create a resilient [[SequencerSubscriptionPekko]] for the [[SequencerClient]] by + * creating underlying subscriptions using the [[SequencerSubscriptionFactoryPekko]] + * and then recreating them if they fail with a reason that is deemed retryable. + * If a subscription is closed or fails with a reason that is not retryable the failure will be passed downstream + * from this subscription. + * We determine whether an error is retryable by calling the [[SubscriptionErrorRetryPolicy]] + * of the supplied [[SequencerSubscriptionFactoryPekko]]. + * We also will delay recreating subscriptions by an interval determined by the + * [[com.digitalasset.canton.sequencing.client.SubscriptionRetryDelayRule]]. + * The recreated subscription starts at the last event received, + * or at the starting counter that was given initially if no event was received at all. + * + * The emitted events stutter whenever the subscription is recreated. + */ +class ResilientSequencerSubscriberPekko[E]( + retryDelayRule: SubscriptionRetryDelayRule, + subscriptionFactory: SequencerSubscriptionFactoryPekko[E], + protected override val timeouts: ProcessingTimeout, + protected override val loggerFactory: NamedLoggerFactory, +)(implicit materializer: Materializer) + extends FlagCloseable + with NamedLogging { + import ResilientSequencerSubscriberPekko.* + + /** Start running the resilient sequencer subscription from the given counter */ + def subscribeFrom(startingCounter: SequencerCounter)(implicit + traceContext: TraceContext + ): SequencerSubscriptionPekko[E] = { + + logger.debug(s"Starting resilient sequencer subscription from counter $startingCounter") + val onShutdownRunner = new OnShutdownRunner.PureOnShutdownRunner(logger) + val sequencerId = subscriptionFactory.sequencerId + val health = new ResilientSequencerSubscriptionHealth( + s"sequencer-subscription-for-$sequencerId-starting-at-$startingCounter", + sequencerId, + onShutdownRunner, + logger, + ) + val initial = + RestartSourceConfig(startingCounter, retryDelayRule.initialDelay, health)(traceContext) + val source = PekkoUtil + .restartSource("resilient-sequencer-subscription", initial, mkSource, policy) + // Filter out retried errors + .filter { + case WithKillSwitch(Left(triaged)) => !triaged.retryable + case WithKillSwitch(Right(_)) => true + } + .map(_.map(_.leftMap(_.error))) + .mapMaterializedValue { case (killSwitch, doneF) => + implicit val ec: ExecutionContext = materializer.executionContext + val closedHealthF = doneF.thereafter { _ => + // A `restartSource` may be materialized at most once anyway, + // so it's OK to use a shared OnShutdownRunner and HealthComponent here + onShutdownRunner.close() + } + (killSwitch, closedHealthF) + } + SequencerSubscriptionPekko(source, health) + } + + private val policy: RetrySourcePolicy[ + RestartSourceConfig, + Either[TriagedError[E], OrdinarySerializedEvent], + ] = new RetrySourcePolicy[RestartSourceConfig, Either[TriagedError[E], OrdinarySerializedEvent]] { + override def shouldRetry( + lastState: RestartSourceConfig, + lastEmittedElement: Option[Either[TriagedError[E], OrdinarySerializedEvent]], + lastFailure: Option[Throwable], + ): Option[(FiniteDuration, RestartSourceConfig)] = { + implicit val traceContext: TraceContext = lastState.traceContext + val retryPolicy = subscriptionFactory.retryPolicy + val hasReceivedEvent = lastEmittedElement.exists { + case Left(err) => err.hasReceivedElements + case Right(_) => true + } + val canRetry = lastFailure match { + case None => + lastEmittedElement match { + case Some(Right(_)) => false + case Some(Left(err)) => + val canRetry = err.retryable + if (!canRetry) + logger.warn(s"Closing resilient sequencer subscription due to error: ${err.error}") + canRetry + case None => + logger.info("The sequencer subscription has been terminated by the server.") + false + } + case Some(ex: AbruptStageTerminationException) => + logger.debug("Giving up on resilient sequencer subscription due to shutdown", ex) + false + case Some(ex) => + val canRetry = retryPolicy.retryOnException(ex) + if (canRetry) { + logger.warn( + s"The sequencer subscription encountered an exception and will be restarted", + ex, + ) + true + } else { + logger.error( + "Closing resilient sequencer subscription due to exception", + ex, + ) + false + } + } + Option.when(canRetry) { + val newDelay = retryDelayRule.nextDelay(lastState.delay, hasReceivedEvent) + val logMessage = + s"Waiting ${LoggerUtil.roundDurationForHumans(newDelay)} before reconnecting" + if (newDelay < retryDelayRule.warnDelayDuration) { + logger.debug(logMessage) + } else if (lastState.health.isFailed) { + logger.info(logMessage) + } else { + val error = + LostSequencerSubscription.Warn(subscriptionFactory.sequencerId, _logOnCreation = true) + lastState.health.failureOccurred(error) + } + + val nextCounter = lastEmittedElement.fold(lastState.startingCounter)( + _.fold(_.lastSequencerCounter, _.counter) + ) + lastState.delay -> lastState.copy(startingCounter = nextCounter, delay = newDelay) + } + } + } + + private def mkSource( + config: RestartSourceConfig + ): Source[Either[TriagedError[E], OrdinarySerializedEvent], (KillSwitch, Future[Done])] = { + implicit val traceContext: TraceContext = config.traceContext + val nextCounter = config.startingCounter + logger.debug(s"Starting new sequencer subscription from $nextCounter") + subscriptionFactory + .create(nextCounter) + .source + .statefulMap(() => TriageState(false, nextCounter))(triageError(config.health), _ => None) + } + + private def triageError(health: ResilientSequencerSubscriptionHealth)( + state: TriageState, + elementWithKillSwitch: WithKillSwitch[Either[E, OrdinarySerializedEvent]], + )(implicit + traceContext: TraceContext + ): (TriageState, Either[TriagedError[E], OrdinarySerializedEvent]) = { + val element = elementWithKillSwitch.unwrap + val TriageState(hasPreviouslyReceivedEvents, lastSequencerCounter) = state + val hasReceivedEvents = hasPreviouslyReceivedEvents || element.isRight + // Resolve to healthy when we get a new element again + if (!hasPreviouslyReceivedEvents && element.isRight) { + health.resolveUnhealthy() + } + val triaged = element.leftMap { err => + val canRetry = subscriptionFactory.retryPolicy.retryOnError(err, hasReceivedEvents) + TriagedError(canRetry, hasReceivedEvents, lastSequencerCounter, err) + } + val currentSequencerCounter = element.fold(_ => lastSequencerCounter, _.counter) + val newState = TriageState(hasReceivedEvents, currentSequencerCounter) + (newState, triaged) + } +} + +object ResilientSequencerSubscriberPekko { + + /** @param startingCounter The counter to start the next subscription from + * @param delay If the next subscription fails with a retryable error, + * how long should we wait before starting a new subscription? + */ + private[ResilientSequencerSubscriberPekko] final case class RestartSourceConfig( + startingCounter: SequencerCounter, + delay: FiniteDuration, + health: ResilientSequencerSubscriptionHealth, + )(val traceContext: TraceContext) + extends PrettyPrinting { + override def pretty: Pretty[RestartSourceConfig.this.type] = prettyOfClass( + param("starting counter", _.startingCounter) + ) + + def copy( + startingCounter: SequencerCounter = this.startingCounter, + delay: FiniteDuration = this.delay, + health: ResilientSequencerSubscriptionHealth = this.health, + ): RestartSourceConfig = RestartSourceConfig(startingCounter, delay, health)(traceContext) + } + + private final case class TriagedError[+E]( + retryable: Boolean, + hasReceivedElements: Boolean, + lastSequencerCounter: SequencerCounter, + error: E, + ) + + def factory[E]( + sequencerID: SequencerId, + retryDelayRule: SubscriptionRetryDelayRule, + subscriptionFactory: SequencerSubscriptionFactoryPekko[E], + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit + materializer: Materializer + ): SequencerSubscriptionFactoryPekko[E] = { + val subscriber = new ResilientSequencerSubscriberPekko[E]( + retryDelayRule, + subscriptionFactory, + timeouts, + loggerFactory, + ) + new SequencerSubscriptionFactoryPekko[E] { + override def sequencerId: SequencerId = sequencerID + + override def create(startingCounter: SequencerCounter)(implicit + traceContext: TraceContext + ): SequencerSubscriptionPekko[E] = subscriber.subscribeFrom(startingCounter) + + override val retryPolicy: SubscriptionErrorRetryPolicyPekko[E] = + SubscriptionErrorRetryPolicyPekko.never + } + } + + private final case class TriageState( + hasPreviouslyReceivedEvents: Boolean, + lastSequencerCounter: SequencerCounter, + ) + + private class ResilientSequencerSubscriptionHealth( + override val name: String, + sequencerId: SequencerId, + override protected val associatedOnShutdownRunner: OnShutdownRunner, + override protected val logger: TracedLogger, + ) extends AtomicHealthComponent { + override protected def initialHealthState: ComponentHealthState = ComponentHealthState.Ok() + override def closingState: ComponentHealthState = + ComponentHealthState.failed(s"Disconnected from sequencer $sequencerId") + } +} + +trait SequencerSubscriptionFactoryPekko[E] extends HasSequencerSubscriptionFactoryPekko[E] { + + /** The ID of the sequencer this factory creates subscriptions to */ + def sequencerId: SequencerId + + def create( + startingCounter: SequencerCounter + )(implicit traceContext: TraceContext): SequencerSubscriptionPekko[E] + + def retryPolicy: SubscriptionErrorRetryPolicyPekko[E] + + override def subscriptionFactory: this.type = this +} + +object SequencerSubscriptionFactoryPekko { + + /** Creates a [[SequencerSubscriptionFactoryPekko]] for a [[ResilientSequencerSubscriberPekko]] + * that uses an underlying gRPC transport. + * Changes to the underlying gRPC transport are not supported by the [[ResilientSequencerSubscriberPekko]]; + * these can be done via the sequencer aggregator. + */ + def fromTransport( + sequencerID: SequencerId, + transport: SequencerClientTransportPekko, + requiresAuthentication: Boolean, + member: Member, + protocolVersion: ProtocolVersion, + ): SequencerSubscriptionFactoryPekko[transport.SubscriptionError] = + new SequencerSubscriptionFactoryPekko[transport.SubscriptionError] { + override def sequencerId: SequencerId = sequencerID + + override def create(startingCounter: SequencerCounter)(implicit + traceContext: TraceContext + ): SequencerSubscriptionPekko[transport.SubscriptionError] = { + val request = SubscriptionRequest(member, startingCounter, protocolVersion) + if (requiresAuthentication) transport.subscribe(request) + else transport.subscribeUnauthenticated(request) + } + + override val retryPolicy: SubscriptionErrorRetryPolicyPekko[transport.SubscriptionError] = + transport.subscriptionRetryPolicyPekko + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/ResilientSequencerSubscription.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/ResilientSequencerSubscription.scala new file mode 100644 index 0000000000..6eff3863eb --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/ResilientSequencerSubscription.scala @@ -0,0 +1,407 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import cats.syntax.functor.* +import cats.syntax.option.* +import com.daml.error.{ErrorCategory, ErrorCode, Explanation, Resolution} +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.error.CantonError +import com.digitalasset.canton.error.CantonErrorGroups.SequencerSubscriptionErrorGroup +import com.digitalasset.canton.health.{CloseableAtomicHealthComponent, ComponentHealthState} +import com.digitalasset.canton.lifecycle.* +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.sequencing.SerializedEventHandler +import com.digitalasset.canton.sequencing.client.ResilientSequencerSubscription.LostSequencerSubscription +import com.digitalasset.canton.sequencing.client.SequencerClientSubscriptionError.{ + ApplicationHandlerPassive, + ApplicationHandlerShutdown, +} +import com.digitalasset.canton.sequencing.client.transports.SequencerClientTransport +import com.digitalasset.canton.sequencing.handlers.{CounterCapture, HasReceivedEvent} +import com.digitalasset.canton.sequencing.protocol.SubscriptionRequest +import com.digitalasset.canton.topology.{DomainId, Member, SequencerId} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.tracing.TraceContext.withNewTraceContext +import com.digitalasset.canton.util.{DelayUtil, FutureUtil, LoggerUtil} +import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.{DiscardOps, SequencerCounter} +import org.apache.pekko.stream.AbruptStageTerminationException + +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future} +import scala.util.{Failure, Success, Try} + +/** Attempts to create a resilient [[SequencerSubscription]] for the [[SequencerClient]] by + * creating underlying subscriptions using the [[com.digitalasset.canton.sequencing.client.transports.SequencerClientTransport]] + * and then recreating them if they fail with a reason that is deemed retryable. + * If a subscription is closed or fails with a reason that is not retryable the failure will be passed upstream + * from this subscription. + * We determine whether an error is retryable by calling the supplied [[SubscriptionErrorRetryPolicy]]. + * We also will delay recreating subscriptions by an interval determined by the + * [[com.digitalasset.canton.sequencing.client.SubscriptionRetryDelayRule]]. + * As we have to know where to restart a subscription from when it is recreated + * we use a [[com.digitalasset.canton.sequencing.handlers.CounterCapture]] handler + * wrapper to keep track of the last event that was successfully provided by the provided handler, and use this value + * to restart new subscriptions from. + * For this subscription [[ResilientSequencerSubscription.start]] must be called for the underlying subscriptions to begin. + */ +class ResilientSequencerSubscription[HandlerError]( + domainId: DomainId, + startingFrom: SequencerCounter, + handler: SerializedEventHandler[HandlerError], + subscriptionFactory: SequencerSubscriptionFactory[HandlerError], + retryDelayRule: SubscriptionRetryDelayRule, + override protected val timeouts: ProcessingTimeout, + protected val loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext) + extends SequencerSubscription[HandlerError] + with NamedLogging + with CloseableAtomicHealthComponent + with FlagCloseableAsync { + override val name: String = SequencerClient.healthName + override val initialHealthState: ComponentHealthState = ComponentHealthState.Ok() + override def closingState: ComponentHealthState = + ComponentHealthState.failed("Disconnected from domain") + private val nextSubscriptionRef = + new AtomicReference[Option[SequencerSubscription[HandlerError]]](None) + private val counterCapture = new CounterCapture(startingFrom, loggerFactory) + + /** Start running the resilient sequencer subscription */ + def start(implicit traceContext: TraceContext): Unit = setupNewSubscription() + + /** Start a new subscription to the sequencer. + * @param delayOnRestart If this subscription fails with an error that can be retried, how long should we wait before starting a new subscription? + */ + private def setupNewSubscription( + delayOnRestart: FiniteDuration = retryDelayRule.initialDelay + )(implicit traceContext: TraceContext): Unit = + performUnlessClosing(functionFullName) { + def started( + hasReceivedEvent: HasReceivedEvent, + newSubscription: SequencerSubscription[HandlerError], + retryPolicy: SubscriptionErrorRetryPolicy, + ): Unit = { + logger.debug( + s"The sequencer subscription has been successfully started" + ) + + // register resolution + FutureUtil.doNotAwait( + hasReceivedEvent.awaitEvent.map { _ => + resolveUnhealthy() + }, + "has received event failed", + ) + + // setup handling when it is complete + newSubscription.closeReason onComplete { + case Success(SubscriptionCloseReason.TransportChange) => + // Create a new subscription and reset the retry delay + // It is the responsibility of the subscription factory to use the changed transport + setupNewSubscription(retryDelayRule.initialDelay) + + case Success(_: SubscriptionCloseReason.SubscriptionError) if isClosing => + giveUp(Success(SubscriptionCloseReason.Shutdown)) + + case error @ Success(subscriptionError: SubscriptionCloseReason.SubscriptionError) => + val canRetry = + retryPolicy.retryOnError(subscriptionError, hasReceivedEvent.hasReceivedEvent) + if (canRetry) { + // retry subscription. the retry rule logs at an appropriate level for the given error so we just note + // that we are retrying at debug level here. + logger.debug( + s"The sequencer subscription encountered an error and will be restarted: $subscriptionError" + ) + delayAndRestartSubscription(hasReceivedEvent.hasReceivedEvent, delayOnRestart) + } else { + // we decided we shouldn't attempt to restart a subscription after this error + giveUp(error) + } + + case Failure(_: AbruptStageTerminationException) if isClosing => + giveUp(Success(SubscriptionCloseReason.Shutdown)) + + case Failure(exn) => + val canRetry = retryPolicy.retryOnException(exn, logger) + + if (canRetry) { + // retry subscription + logger.warn( + s"The sequencer subscription encountered an exception and will be restarted: $exn", + exn, + ) + delayAndRestartSubscription(hasReceivedEvent.hasReceivedEvent, delayOnRestart) + } else { + // we decided we shouldn't attempt to restart a subscription after this error + giveUp(Failure(exn)) + } + + case unrecoverableReason => + // for all other reasons assume we can't retry and shut ourselves down + giveUp(unrecoverableReason) + + } + } + + createSubscription.map((started _).tupled) + }.map( + // the inner UnlessShutdown is the signal from the SequencerSubscriptionFactory that it is being shut down. + // (really it is about the SequencerTransportState being shutdown) + // if we call this inside within the performUnlessClosing block above, it will call close on this ResilientSequencerSubscription, + // which in turn will not be able to proceed, because it will wait for that same performUnlessClosing task to complete, which can't happen, + // because it contains the call to close + _.onShutdown(giveUp(Success(SubscriptionCloseReason.Shutdown))) + ) + // the outer UnlessShutdown is about detecting that ResilientSequencerSubscription is being closed + .onShutdown(()) + + private def delayAndRestartSubscription(hasReceivedEvent: Boolean, delay: FiniteDuration)(implicit + traceContext: TraceContext + ): Unit = { + val logMessage = s"Waiting ${LoggerUtil.roundDurationForHumans(delay)} before reconnecting" + if (delay < retryDelayRule.warnDelayDuration) { + logger.debug(logMessage) + } else if (isFailed) { + logger.info(logMessage) + } else if (!isClosing) { + TraceContext.withNewTraceContext { tx => + this.failureOccurred( + LostSequencerSubscription.Warn(SequencerId(domainId))(this.errorLoggingContext(tx)) + ) + } + } + + // delay and then restart a subscription with an updated delay duration + // we effectively throwing away the future here so add some logging in case it fails + FutureUtil.doNotAwait( + DelayUtil.delay(functionFullName, delay, this) map { _ => + val newDelay = retryDelayRule.nextDelay(delay, hasReceivedEvent) + setupNewSubscription(newDelay) + }, + "Delaying setup of new sequencer subscription failed", + ) + } + + private def createSubscription(implicit traceContext: TraceContext): UnlessShutdown[ + (HasReceivedEvent, SequencerSubscription[HandlerError], SubscriptionErrorRetryPolicy) + ] = { + // we are subscribing from the last event we've already received (this way we are sure that we + // successfully resubscribed). the event will subsequently be ignored by the sequencer client. + // even more, the event will be compared with the previous event received and we'll complain + // if we observed a fork + val nextCounter = counterCapture.counter + val (hasReceivedEvent, wrappedHandler) = HasReceivedEvent(counterCapture(handler)) + logger.debug(s"Starting new sequencer subscription from $nextCounter") + + val subscriptionE = subscriptionFactory.create(nextCounter, wrappedHandler)(traceContext) + nextSubscriptionRef.set(subscriptionE.map(_._1.some).onShutdown(None)) + + subscriptionE.map { case (subscription, retryPolicy) => + (hasReceivedEvent, subscription, retryPolicy) + } + } + + // stop the current subscription, do not retry, and propagate the failure upstream + private def giveUp( + reason: Try[SubscriptionCloseReason[HandlerError]] + )(implicit tc: TraceContext): Unit = { + reason match { + case Success(SubscriptionCloseReason.Closed) => + logger.trace("Sequencer subscription is being closed") + case Success(SubscriptionCloseReason.Shutdown) => + logger.info("Sequencer subscription is being closed due to an ongoing shutdown") + case Success(SubscriptionCloseReason.HandlerError(_: ApplicationHandlerShutdown.type)) => + logger.info("Sequencer subscription is being closed due to handler shutdown") + case Success(SubscriptionCloseReason.HandlerError(ApplicationHandlerPassive(reason))) => + logger.warn( + s"Closing resilient sequencer subscription because instance became passive: $reason" + ) + case Success(Fatal(reason)) if isClosing => + logger.info( + s"Closing resilient sequencer subscription after an error due to an ongoing shutdown: $reason" + ) + case Success(error) => + logger.warn(s"Closing resilient sequencer subscription due to error: $error") + case Failure(exception) => + logger.error(s"Closing resilient sequencer subscription due to exception", exception) + } + closeReasonPromise.tryComplete(reason).discard + close() + } + + /** Closes the current subscription with [[SubscriptionCloseReason.TransportChange]] and resubscribes + * using the `subscriptionFactory`, provided that there is currently a subscription. + * + * @return The future completes after the old subscription has been closed. + */ + def resubscribeOnTransportChange()(implicit traceContext: TraceContext): Future[Unit] = { + nextSubscriptionRef.get() match { + case None => Future.unit + case Some(subscription) => + subscription.complete(SubscriptionCloseReason.TransportChange) + subscription.closeReason.void + } + } + + override private[canton] def complete( + reason: SubscriptionCloseReason[HandlerError] + )(implicit traceContext: TraceContext): Unit = + giveUp(Success(reason)) + + private def closeSubscription( + subscription: SequencerSubscription[HandlerError] + )(implicit traceContext: TraceContext): Unit = { + logger.debug(s"Closing subscription") + subscription.close() + + val reason = Try( + timeouts.shutdownNetwork.await("wait for the running sequencer subscription to close")( + subscription.closeReason + ) + ) + + reason match { + case Success(reason) => + logger.debug(s"Underlying subscription closed with reason: $reason") + case Failure(ex) => + logger.warn(s"Underlying subscription failed to close", ex) + } + + val _ = closeReasonPromise.tryComplete(reason) + } + + override protected def closeAsync(): Seq[AsyncOrSyncCloseable] = withNewTraceContext { + implicit traceContext => + Seq( + SyncCloseable( + "underlying-subscription", + nextSubscriptionRef.get().foreach(closeSubscription), + ), + SyncCloseable( + "close-reason", { + // ensure that it is always completed even if there is no running subscription + closeReasonPromise.tryComplete(Success(SubscriptionCloseReason.Closed)).discard[Boolean] + }, + ), + ) + } +} + +object ResilientSequencerSubscription extends SequencerSubscriptionErrorGroup { + def apply[E]( + domainId: DomainId, + protocolVersion: ProtocolVersion, + member: Member, + getTransport: => UnlessShutdown[SequencerClientTransport], + handler: SerializedEventHandler[E], + startingFrom: SequencerCounter, + initialDelay: FiniteDuration, + warnDelay: FiniteDuration, + maxRetryDelay: FiniteDuration, + timeouts: ProcessingTimeout, + requiresAuthentication: Boolean, + loggerFactory: NamedLoggerFactory, + )(implicit executionContext: ExecutionContext): ResilientSequencerSubscription[E] = { + new ResilientSequencerSubscription[E]( + domainId, + startingFrom, + handler, + createSubscription(member, getTransport, requiresAuthentication, protocolVersion), + SubscriptionRetryDelayRule( + initialDelay, + warnDelay, + maxRetryDelay, + ), + timeouts, + loggerFactory, + ) + } + + /** Creates a simpler handler subscription function for the underlying class */ + private def createSubscription[E]( + member: Member, + getTransport: => UnlessShutdown[SequencerClientTransport], + requiresAuthentication: Boolean, + protocolVersion: ProtocolVersion, + ): SequencerSubscriptionFactory[E] = + new SequencerSubscriptionFactory[E] { + override def create(startingCounter: SequencerCounter, handler: SerializedEventHandler[E])( + implicit traceContext: TraceContext + ): UnlessShutdown[(SequencerSubscription[E], SubscriptionErrorRetryPolicy)] = { + val request = SubscriptionRequest(member, startingCounter, protocolVersion) + getTransport + .map { transport => + val subscription = + if (requiresAuthentication) transport.subscribe(request, handler)(traceContext) + else transport.subscribeUnauthenticated(request, handler)(traceContext) + (subscription, transport.subscriptionRetryPolicy) + } + } + } + + @Explanation( + """This warning is logged when a sequencer subscription is interrupted. The system will keep on retrying to reconnect indefinitely.""" + ) + @Resolution( + "Monitor the situation and contact the server operator if the issues does not resolve itself automatically." + ) + object LostSequencerSubscription + extends ErrorCode( + "SEQUENCER_SUBSCRIPTION_LOST", + ErrorCategory.BackgroundProcessDegradationWarning, + ) { + + final case class Warn(sequencer: SequencerId, _logOnCreation: Boolean = true)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + s"Lost subscription to sequencer ${sequencer.toString}. Will try to recover automatically." + ) { + override def logOnCreation: Boolean = _logOnCreation + } + } + + @Explanation( + """This error is logged when a sequencer client determined a ledger fork, where a sequencer node + |responded with different events for the same timestamp / counter. + | + |Whenever a client reconnects to a domain, it will start with the last message received and compare + |whether that last message matches the one it received previously. If not, it will report with this error. + | + |A ledger fork should not happen in normal operation. It can happen if the backups have been taken + |in a wrong order and e.g. the participant was more advanced than the sequencer. + |""" + ) + @Resolution( + """You can recover by restoring the system with a correctly ordered backup. Please consult the + |respective sections in the manual.""" + ) + object ForkHappened + extends ErrorCode( + "SEQUENCER_FORK_DETECTED", + ErrorCategory.SystemInternalAssumptionViolated, + ) + +} + +/** Errors that may occur on the creation of a sequencer subscription + */ +sealed trait SequencerSubscriptionCreationError extends SubscriptionCloseReason.SubscriptionError + +/** When a fatal error occurs on the creation of a sequencer subscription, the [[com.digitalasset.canton.sequencing.client.ResilientSequencerSubscription]] + * will not retry the subscription creation. Instead, the subscription will fail. + */ +final case class Fatal(msg: String) extends SequencerSubscriptionCreationError + +trait SequencerSubscriptionFactory[HandlerError] { + def create( + startingCounter: SequencerCounter, + handler: SerializedEventHandler[HandlerError], + )(implicit + traceContext: TraceContext + ): UnlessShutdown[(SequencerSubscription[HandlerError], SubscriptionErrorRetryPolicy)] +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendAsyncClientError.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendAsyncClientError.scala new file mode 100644 index 0000000000..b9b5389e76 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendAsyncClientError.scala @@ -0,0 +1,38 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.sequencing.protocol.SendAsyncError + +/** Errors returned from the AsyncSend where we are sure the request has not potentially been accepted by the server + * so may be retried using a new message id (as a tracked message id for the failed request may remain in the pending + * send set). + * If a technical error is encountered by the sequencer client where there is a chance that the send will be sequenced + * it should not be returned to the caller through this error. + */ +sealed trait SendAsyncClientError extends Product with Serializable with PrettyPrinting + +object SendAsyncClientError { + + /** The [[SequencerClient]] decided that the request is invalid so did not attempt to send it to the sequencer */ + final case class RequestInvalid(message: String) extends SendAsyncClientError { + override def pretty: Pretty[RequestInvalid] = prettyOfClass(unnamedParam(_.message.unquoted)) + } + + /** A send with the supplied message id is already being tracked */ + case object DuplicateMessageId extends SendAsyncClientError { + override def pretty: Pretty[DuplicateMessageId.type] = prettyOfObject[DuplicateMessageId.type] + } + + /** We were unable to make the request for a technical reason */ + final case class RequestFailed(message: String) extends SendAsyncClientError { + override def pretty: Pretty[RequestFailed] = prettyOfClass(unnamedParam(_.message.unquoted)) + } + + /** We were able to contact the server but the request was declined */ + final case class RequestRefused(error: SendAsyncError) extends SendAsyncClientError { + override def pretty: Pretty[RequestRefused] = prettyOfClass(unnamedParam(_.error)) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendCallback.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendCallback.scala new file mode 100644 index 0000000000..905731ce2b --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendCallback.scala @@ -0,0 +1,44 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, UnlessShutdown} +import com.digitalasset.canton.logging.TracedLogger +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.Promise + +/** Utilities for a SendCallback passed to the send method of the [[SequencerClient]] */ +object SendCallback { + + /** Do nothing when send result is observed */ + val empty: SendCallback = _ => () + + /** Callback that just logs the eventual result with provided logger and traceContext available at the callsite. + * @param sendDescription Description of the send appropriate for a log message. Will have the outcome appended to it. + */ + def log(sendDescription: String, logger: TracedLogger)(implicit + traceContext: TraceContext + ): SendCallback = + SendResult.log(sendDescription, logger)(_) + + /** Provides an easy mechanism for waiting for the send result. + * Should likely not be used within event handlers as this could prevent reading further events that may complete this callback, + * and cause a deadlock. + */ + class CallbackFuture extends SendCallback { + import com.digitalasset.canton.DiscardOps + + private val promise = Promise[UnlessShutdown[SendResult]]() + + val future: FutureUnlessShutdown[SendResult] = { + FutureUnlessShutdown(promise.future) + } + + override def apply(result: UnlessShutdown[SendResult]): Unit = + promise.trySuccess(result).discard + } + + def future: CallbackFuture = new CallbackFuture +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendResult.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendResult.scala new file mode 100644 index 0000000000..d671e8adf3 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendResult.scala @@ -0,0 +1,79 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, UnlessShutdown} +import com.digitalasset.canton.logging.TracedLogger +import com.digitalasset.canton.sequencing.protocol.{ + Deliver, + DeliverError, + Envelope, + SequencerErrors, +} +import com.digitalasset.canton.tracing.TraceContext + +/** Possible outcomes for a send operation can be observed by a SequencerClient */ +sealed trait SendResult extends Product with Serializable + +object SendResult { + + /** Send caused a deliver event to be successfully sequenced. + * For aggregatable submission requests, success means that the aggregatable submission was assigned a timestamp. + * It does not mean that the [[com.digitalasset.canton.sequencing.protocol.AggregationRule.threshold]] + * was reached and the envelopes are delivered. + * Accordingly, the [[com.digitalasset.canton.sequencing.protocol.Deliver]] event may contain an empty batch. + */ + final case class Success(deliver: Deliver[Envelope[_]]) extends SendResult + + /** Send caused an event that indicates that the submission was not and never will be sequenced */ + sealed trait NotSequenced extends SendResult + + /** Send caused a deliver error to be sequenced */ + final case class Error(error: DeliverError) extends NotSequenced + + /** No event was sequenced for the send up until the provided max sequencing time. + * A correct sequencer implementation will no longer sequence any events from the send past this point. + */ + final case class Timeout(sequencerTime: CantonTimestamp) extends NotSequenced + + /** Log the value of this result to the given logger at an appropriate level and given description */ + def log(sendDescription: String, logger: TracedLogger)( + result: UnlessShutdown[SendResult] + )(implicit traceContext: TraceContext): Unit = result match { + case UnlessShutdown.Outcome(SendResult.Success(deliver)) => + logger.trace(s"$sendDescription was sequenced at ${deliver.timestamp}") + case UnlessShutdown.Outcome(SendResult.Error(error)) => + logger.warn( + s"$sendDescription was rejected by the sequencer at ${error.timestamp} because [${error.reason}]" + ) + case UnlessShutdown.Outcome(SendResult.Timeout(sequencerTime)) => + logger.warn(s"$sendDescription timed out at $sequencerTime") + case UnlessShutdown.AbortedDueToShutdown => + logger.debug(s"$sendDescription aborted due to shutdown") + } + + def toFutureUnlessShutdown( + sendDescription: String + )(result: SendResult): FutureUnlessShutdown[Unit] = + result match { + case SendResult.Success(_) => + FutureUnlessShutdown.pure(()) + case SendResult.Error( + DeliverError(_, _, _, _, SequencerErrors.AggregateSubmissionAlreadySent(_)) + ) => + // Stop retrying + FutureUnlessShutdown.unit + case SendResult.Error(error) => + FutureUnlessShutdown.failed( + new RuntimeException( + s"$sendDescription was rejected by the sequencer at ${error.timestamp} because [${error.reason}]" + ) + ) + case SendResult.Timeout(sequencerTime) => + FutureUnlessShutdown.failed( + new RuntimeException(s"$sendDescription timed out at $sequencerTime") + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendTracker.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendTracker.scala new file mode 100644 index 0000000000..9660a2a05a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendTracker.scala @@ -0,0 +1,286 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import cats.data.EitherT +import cats.syntax.foldable.* +import cats.syntax.option.* +import com.daml.metrics.api.MetricsContext.withEmptyMetricsContext +import com.digitalasset.canton.concurrent.DirectExecutionContext +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.{ + AsyncCloseable, + AsyncOrSyncCloseable, + FlagCloseableAsync, + SyncCloseable, + UnlessShutdown, +} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.metrics.SequencerClientMetrics +import com.digitalasset.canton.sequencing.protocol.{ + Deliver, + DeliverError, + MessageId, + SequencedEvent, +} +import com.digitalasset.canton.store.SequencedEventStore.OrdinarySequencedEvent +import com.digitalasset.canton.store.{SavePendingSendError, SendTrackerStore} +import com.digitalasset.canton.tracing.{TraceContext, Traced} +import com.digitalasset.canton.util.MonadUtil +import com.digitalasset.canton.util.MonadUtil.sequentialTraverse_ +import com.google.common.annotations.VisibleForTesting + +import java.time.Instant +import scala.collection.concurrent.TrieMap +import scala.concurrent.Future + +/** When a we make a send request to the sequencer it will not be sequenced until some point in the future and may not + * be sequenced at all. To track a request call `send` with the messageId and max-sequencing-time of the request, + * the tracker then observes sequenced events and will notify the provided handler whether the send times out. + * For aggregatable submission requests, the send tracker notifies the handler of successful sequencing of the submission request, + * not of successful delivery of the envelopes when the + * [[com.digitalasset.canton.sequencing.protocol.AggregationRule.threshold]] has been reached. + * In fact, there is no notification of whether the threshold was reached before the max sequencing time. + */ +class SendTracker( + initialPendingSends: Map[MessageId, CantonTimestamp], + store: SendTrackerStore, + metrics: SequencerClientMetrics, + protected val loggerFactory: NamedLoggerFactory, + override val timeouts: ProcessingTimeout, +) extends NamedLogging + with FlagCloseableAsync + with AutoCloseable { + + private implicit val directExecutionContext: DirectExecutionContext = DirectExecutionContext( + noTracingLogger + ) + + /** Details of sends in flight + * @param startedAt The time the request was made for calculating the elapsed duration for metrics. + * We use the host clock time for this value and it is only tracked ephemerally + * as the elapsed value will not be useful if the local process restarts during sequencing. + */ + private case class PendingSend( + maxSequencingTime: CantonTimestamp, + callback: SendCallback, + startedAt: Option[Instant], + traceContext: TraceContext, + ) + + private val pendingSends: TrieMap[MessageId, PendingSend] = + (TrieMap.newBuilder ++= initialPendingSends map { + // callbacks and startedAt times will be lost between restarts of the sequencer client + case (messageId, maxSequencingTime) => + messageId -> PendingSend( + maxSequencingTime, + SendCallback.empty, + startedAt = None, + TraceContext.empty, + ) + }).result() + + def track( + messageId: MessageId, + maxSequencingTime: CantonTimestamp, + callback: SendCallback = SendCallback.empty, + )(implicit traceContext: TraceContext): EitherT[Future, SavePendingSendError, Unit] = { + performUnlessClosing(s"track $messageId") { + for { + _ <- store + .savePendingSend(messageId, maxSequencingTime) + _ = pendingSends.put( + messageId, + PendingSend(maxSequencingTime, callback, startedAt = Some(Instant.now()), traceContext), + ) match { + case Some(previousMaxSequencingTime) => + // if we were able to persist the new message id without issue but found the message id in our in-memory + // pending set it suggests either: + // - the database has been modified by a writer other than this sequencer client (so its pending set is not in sync) + // - there is a bug :-| + sys.error( + s"""The SequencerClient pending set of sends is out of sync from the database. + |The database reported no send for $messageId but our pending set includes a prior send with mst of $previousMaxSequencingTime.""".stripMargin + ) + case _none => // we're good + } + _ = metrics.submissions.inFlight.inc() + } yield () + }.onShutdown { + callback(UnlessShutdown.AbortedDueToShutdown) + EitherT.pure(()) + } + } + + /** Cancels a pending send without notifying any callers of the result. + * Should only be used if the send operation itself fails and the transport returns an error + * indicating that the send will never be sequenced. The SequencerClient should then call cancel + * to immediately allow retries with the same message-id and then propagate the send error + * to the caller. + */ + def cancelPendingSend(messageId: MessageId)(implicit traceContext: TraceContext): Future[Unit] = + removePendingSendUnlessTimeout(messageId, resultO = None, sequencedTimeO = None) + + /** Provide the latest sequenced events to update the send tracker + * + * Callers must not call this concurrently and it is assumed that it is called with sequenced events in order of sequencing. + * On receiving an event it will perform the following steps in order: + * 1. If the event is a Deliver or DeliverError from a send that is being tracked it will stop tracking this message id. + * This allows using the message-id for new sends. + * 2. Checks for any pending sends that have a max-sequencing-time that is less than the timestamp of this event. + * These events have timed out and a correct sequencer implementation will no longer sequence any events for this send. + * The callback of the pending event will be called with the outcome result. + * + * The operations performed by update are not atomic, if an error is encountered midway through processing an event + * then a subsequent replay will cause operations that still have pending sends stored to be retried. + */ + def update( + events: Seq[OrdinarySequencedEvent[_]] + ): Future[Unit] = if (events.isEmpty) Future.unit + else { + for { + maxTimestamp <- events.foldM(CantonTimestamp.MinValue) { case (maxTs, event) => + removePendingSend(event.signedEvent.content)(event.traceContext).map { _ => + maxTs.max(event.timestamp) + } + } + _ <- processTimeouts(maxTimestamp) + } yield () + } + + private def processTimeouts( + timestamp: CantonTimestamp + ): Future[Unit] = { + val timedOut = pendingSends.collect { + case (messageId, PendingSend(maxSequencingTime, _, _, traceContext)) + if maxSequencingTime < timestamp => + Traced(messageId)(traceContext) + }.toList + // parallel would be okay + sequentialTraverse_(timedOut)(_.withTraceContext { implicit traceContext => + handleTimeout(timestamp) + }) + } + + @VisibleForTesting + protected def handleTimeout(timestamp: CantonTimestamp)( + messageId: MessageId + )(implicit traceContext: TraceContext): Future[Unit] = { + logger.debug(s"Sequencer send [$messageId] has timed out at $timestamp") + for { + _ <- removePendingSendUnlessTimeout( + messageId, + UnlessShutdown.Outcome(SendResult.Timeout(timestamp)).some, + None, // none because the message really timed out + ) + } yield () + } + + private def removePendingSend( + event: SequencedEvent[_] + )(implicit traceContext: TraceContext): Future[Unit] = + extractSendResult(event) + .fold(Future.unit) { case (messageId, sendResult) => + removePendingSendUnlessTimeout( + messageId, + UnlessShutdown.Outcome(sendResult).some, + Some(event.timestamp), + ) + } + + private def updateSequencedMetrics(pendingSend: PendingSend, result: SendResult): Unit = { + def recordSequencingTime(): Unit = { + withEmptyMetricsContext { implicit metricsContext => + pendingSend.startedAt foreach { startedAt => + val elapsed = java.time.Duration.between(startedAt, Instant.now()) + metrics.submissions.sequencingTime.update(elapsed) + } + } + } + + result match { + case SendResult.Success(_) => recordSequencingTime() + case SendResult.Error(_) => + // even though it's an error the sequencer still sequenced our request + recordSequencingTime() + case SendResult.Timeout(_) => + // intentionally not updating sequencing time as this implies no event was sequenced from our request + metrics.submissions.dropped.inc() + } + } + + /** Removes the pending send. + * If a send result is supplied the callback will be called with it. + * If the sequencedTime is supplied and it is more recent than the max-sequencing time of the + * event, then we will not remove the pending send. + */ + private def removePendingSendUnlessTimeout( + messageId: MessageId, + resultO: Option[UnlessShutdown[SendResult]], + sequencedTimeO: Option[CantonTimestamp], + )(implicit + traceContext: TraceContext + ): Future[Unit] = { + // note: this should be okay from a concurrency perspective as there should be only one active + // send with this message-id at a time (track would fail otherwise) + val current = pendingSends.get(messageId) + val removeUnlessTimedOut = pendingSends.updateWith(messageId) { + case Some(pending) if sequencedTimeO.exists(_ > pending.maxSequencingTime) => Some(pending) + case other => + // this shouldn't happen (as per above), but let's leave a note in the logs if it does + if (other != current) + logger.error(s"Concurrent modification of pending sends $other / $current") + None + } + (removeUnlessTimedOut, current) match { + // if the sequencedTime is passed and it is more recent than the max-sequencing time of the + // event, then we will not remove the pending send (it will be picked up later by the handleTimeout method) + case (None, Some(pending)) => + resultO.foreach { result => + result.foreach(updateSequencedMetrics(pending, _)) + pending.callback(result) + } + for { + _ <- store.removePendingSend(messageId) + } yield { + metrics.submissions.inFlight.dec() + } + case (Some(_), _) => + // We observed the command being sequenced but it arrived too late to be processed. + Future.unit + case _ => + logger.debug(s"Removing unknown pending command ${messageId}") + store.removePendingSend(messageId) + } + } + + private def extractSendResult( + event: SequencedEvent[_] + )(implicit traceContext: TraceContext): Option[(MessageId, SendResult)] = { + Option(event) collect { + case deliver @ Deliver(_, _, _, Some(messageId), _) => + logger.trace(s"Send [$messageId] was successful") + (messageId, SendResult.Success(deliver)) + + case error @ DeliverError(_, _, _, messageId, reason) => + logger.debug(s"Send [$messageId] failed: $reason") + (messageId, SendResult.Error(error)) + } + } + + override def closeAsync(): Seq[AsyncOrSyncCloseable] = { + import TraceContext.Implicits.Empty.emptyTraceContext + Seq( + AsyncCloseable( + "complete-pending-sends", + MonadUtil.sequentialTraverse_(pendingSends.keys)( + removePendingSendUnlessTimeout(_, Some(UnlessShutdown.AbortedDueToShutdown), None) + ), + timeouts.shutdownProcessing, + ), + SyncCloseable("send-tracker-store", store.close()), + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendType.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendType.scala new file mode 100644 index 0000000000..a3f515eeef --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendType.scala @@ -0,0 +1,24 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +/** What type of message is being sent. + * Used by the domain and surrounding infrastructure for prioritizing send requests. + */ +sealed trait SendType { + private[client] val isRequest: Boolean +} + +object SendType { + + /** An initial confirmation request. This is subject to throttling at the domain if resource constrained. */ + case object ConfirmationRequest extends SendType { + override private[client] val isRequest = true + } + + /** There's currently no requirement to distinguish other types of request */ + case object Other extends SendType { + override private[client] val isRequest = false + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencedEventValidator.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencedEventValidator.scala new file mode 100644 index 0000000000..fcdddedbc7 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencedEventValidator.scala @@ -0,0 +1,747 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import cats.Monad +import cats.data.EitherT +import cats.syntax.either.* +import cats.syntax.flatMap.* +import cats.syntax.foldable.* +import cats.syntax.functor.* +import cats.syntax.traverse.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.SequencerCounter +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.crypto.{ + HashPurpose, + SignatureCheckError, + SyncCryptoApi, + SyncCryptoClient, +} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.error.CantonError +import com.digitalasset.canton.lifecycle.{ + CloseContext, + FlagCloseable, + FutureUnlessShutdown, + HasCloseContext, + UnlessShutdown, +} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.logging.{ + ErrorLoggingContext, + HasLoggerName, + NamedLoggerFactory, + NamedLogging, + NamedLoggingContext, +} +import com.digitalasset.canton.protocol.DynamicDomainParametersWithValidity +import com.digitalasset.canton.sequencing.client.SequencedEventValidationError.UpstreamSubscriptionError +import com.digitalasset.canton.sequencing.protocol.{ClosedEnvelope, SequencedEvent} +import com.digitalasset.canton.sequencing.{OrdinarySerializedEvent, PossiblyIgnoredSerializedEvent} +import com.digitalasset.canton.store.SequencedEventStore.IgnoredSequencedEvent +import com.digitalasset.canton.time.NonNegativeFiniteDuration +import com.digitalasset.canton.topology.client.TopologySnapshot +import com.digitalasset.canton.topology.{DomainId, SequencerId} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ErrorUtil +import com.digitalasset.canton.util.PekkoUtil.WithKillSwitch +import com.digitalasset.canton.util.PekkoUtil.syntax.* +import com.digitalasset.canton.version.ProtocolVersion + +import scala.concurrent.{ExecutionContext, Future} + +sealed trait SequencedEventValidationError[+E] extends Product with Serializable with PrettyPrinting +object SequencedEventValidationError { + final case class UpstreamSubscriptionError[+E: Pretty](error: E) + extends SequencedEventValidationError[E] { + override def pretty: Pretty[this.type] = prettyOfParam(_.error) + } + final case class BadDomainId(expected: DomainId, received: DomainId) + extends SequencedEventValidationError[Nothing] { + override def pretty: Pretty[BadDomainId] = prettyOfClass( + param("expected", _.expected), + param("received", _.received), + ) + } + final case class DecreasingSequencerCounter( + newCounter: SequencerCounter, + oldCounter: SequencerCounter, + ) extends SequencedEventValidationError[Nothing] { + override def pretty: Pretty[DecreasingSequencerCounter] = prettyOfClass( + param("new counter", _.newCounter), + param("old counter", _.oldCounter), + ) + } + final case class GapInSequencerCounter(newCounter: SequencerCounter, oldCounter: SequencerCounter) + extends SequencedEventValidationError[Nothing] { + override def pretty: Pretty[GapInSequencerCounter] = prettyOfClass( + param("new counter", _.newCounter), + param("old counter", _.oldCounter), + ) + } + final case class NonIncreasingTimestamp( + newTimestamp: CantonTimestamp, + newCounter: SequencerCounter, + oldTimestamp: CantonTimestamp, + oldCounter: SequencerCounter, + ) extends SequencedEventValidationError[Nothing] { + override def pretty: Pretty[NonIncreasingTimestamp] = prettyOfClass( + param("new timestamp", _.newTimestamp), + param("new counter", _.newCounter), + param("old timestamp", _.oldTimestamp), + param("old counter", _.oldCounter), + ) + } + final case class ForkHappened( + counter: SequencerCounter, + suppliedEvent: SequencedEvent[ClosedEnvelope], + expectedEvent: Option[SequencedEvent[ClosedEnvelope]], + )(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + "The sequencer responded with a different message for the same counter / timestamp, which means the sequencer forked." + )(ResilientSequencerSubscription.ForkHappened) + with SequencedEventValidationError[Nothing] + with PrettyPrinting { + override def pretty: Pretty[ForkHappened] = prettyOfClass( + param("counter", _.counter), + param("supplied event", _.suppliedEvent), + paramIfDefined("expected event", _.expectedEvent), + ) + } + final case class SignatureInvalid( + sequencedTimestamp: CantonTimestamp, + usedTimestamp: CantonTimestamp, + error: SignatureCheckError, + ) extends SequencedEventValidationError[Nothing] { + override def pretty: Pretty[SignatureInvalid] = prettyOfClass( + unnamedParam(_.error), + param("sequenced timestamp", _.sequencedTimestamp), + param("used timestamp", _.usedTimestamp), + ) + } + final case class InvalidTimestampOfSigningKey( + sequencedTimestamp: CantonTimestamp, + declaredSigningKeyTimestamp: CantonTimestamp, + reason: SequencedEventValidator.SigningTimestampVerificationError, + ) extends SequencedEventValidationError[Nothing] { + override def pretty: Pretty[InvalidTimestampOfSigningKey] = prettyOfClass( + param("sequenced timestamp", _.sequencedTimestamp), + param("declared signing key timestamp", _.declaredSigningKeyTimestamp), + param("reason", _.reason), + ) + } + final case class TimestampOfSigningKeyNotAllowed( + sequencedTimestamp: CantonTimestamp, + declaredSigningKeyTimestamp: CantonTimestamp, + ) extends SequencedEventValidationError[Nothing] { + override def pretty: Pretty[TimestampOfSigningKeyNotAllowed] = prettyOfClass( + param("sequenced timestamp", _.sequencedTimestamp), + param("decalred signing key timestamp", _.declaredSigningKeyTimestamp), + ) + } +} + +/** Validate whether a received event is valid for processing. */ +trait SequencedEventValidator extends AutoCloseable { + + /** Validates that the supplied event is suitable for processing from the prior event. + * If the event is successfully validated it becomes the event that the event + * in a following call will be validated against. We currently assume this is safe to do as if the event fails to be + * handled by the application then the sequencer client will halt and will need recreating to restart event processing. + */ + def validate( + priorEvent: Option[PossiblyIgnoredSerializedEvent], + event: OrdinarySerializedEvent, + sequencerId: SequencerId, + ): EitherT[FutureUnlessShutdown, SequencedEventValidationError[Nothing], Unit] + + /** Validates a sequenced event when we reconnect against the prior event supplied to [[SequencedEventValidatorFactory.create]] */ + def validateOnReconnect( + priorEvent: Option[PossiblyIgnoredSerializedEvent], + reconnectEvent: OrdinarySerializedEvent, + sequencerId: SequencerId, + ): EitherT[FutureUnlessShutdown, SequencedEventValidationError[Nothing], Unit] + + /** Add event validation to the given [[com.digitalasset.canton.sequencing.client.SequencerSubscriptionPekko]]. + * Stuttering is interpreted as reconnection and validated accordingly. + * + * The returned [[com.digitalasset.canton.sequencing.client.SequencerSubscriptionPekko]] completes after the first + * event validation failure or the first subscription error. It does not stutter any more. + * + * @param priorReconnectEvent The sequenced event at which the reconnection happens. + * If [[scala.Some$]], the first received event must be the same + */ + def validatePekko[E: Pretty]( + subscription: SequencerSubscriptionPekko[E], + priorReconnectEvent: Option[OrdinarySerializedEvent], + sequencerId: SequencerId, + )(implicit + traceContext: TraceContext + ): SequencerSubscriptionPekko[SequencedEventValidationError[E]] +} + +object SequencedEventValidator extends HasLoggerName { + + /** Do not validate sequenced events */ + private case object NoValidation extends SequencedEventValidator { + override def validate( + priorEvent: Option[PossiblyIgnoredSerializedEvent], + event: OrdinarySerializedEvent, + sequencerId: SequencerId, + ): EitherT[FutureUnlessShutdown, SequencedEventValidationError[Nothing], Unit] = + EitherT(FutureUnlessShutdown.pure(Either.right(()))) + override def validateOnReconnect( + priorEvent: Option[PossiblyIgnoredSerializedEvent], + reconnectEvent: OrdinarySerializedEvent, + sequencerId: SequencerId, + ): EitherT[FutureUnlessShutdown, SequencedEventValidationError[Nothing], Unit] = + validate(priorEvent, reconnectEvent, sequencerId) + + override def validatePekko[E: Pretty]( + subscription: SequencerSubscriptionPekko[E], + priorReconnectEvent: Option[OrdinarySerializedEvent], + sequencerId: SequencerId, + )(implicit + traceContext: TraceContext + ): SequencerSubscriptionPekko[SequencedEventValidationError[E]] = + SequencerSubscriptionPekko( + subscription.source.map(_.map(_.leftMap(UpstreamSubscriptionError(_)))), + subscription.health, + ) + + override def close(): Unit = () + } + + /** Do not validate sequenced events. + * Only use it in case of a programming error and the need to unblock a deployment or + * if you blindly trust the sequencer. + * + * @param warn whether to log a warning when used + */ + def noValidation( + domainId: DomainId, + warn: Boolean = true, + )(implicit + loggingContext: NamedLoggingContext + ): SequencedEventValidator = { + if (warn) { + loggingContext.warn( + s"You have opted to skip event validation for domain $domainId. You should not do this unless you know what you are doing." + ) + } + NoValidation + } + + /** Validates the requested signing timestamp against the sequencing timestamp and the + * [[com.digitalasset.canton.protocol.DynamicDomainParameters.sequencerSigningTolerance]] + * of the domain parameters valid at the requested signing timestamp. + * + * @param latestTopologyClientTimestamp The timestamp of an earlier event sent to the topology client + * such that no topology update has happened + * between this timestamp (exclusive) and the sequencing timestamp (exclusive). + * @param warnIfApproximate Whether to emit a warning if an approximate topology snapshot is used + * @param optimistic if true, we'll try to be optimistic and validate the event with the current snapshot approximation + * instead of the proper snapshot for the signing timestamp. + * During sequencer key rolling or while updating the dynamic domain parameters, + * an event might have been signed by a key that was just revoked or with a signing key timestamp + * that exceeds the [[com.digitalasset.canton.protocol.DynamicDomainParameters.sequencerSigningTolerance]]. + * Optimistic validation may not catch such problems. + * @return [[scala.Left$]] if the signing timestamp is after the sequencing timestamp or the sequencing timestamp + * is after the signing timestamp by more than the + * [[com.digitalasset.canton.protocol.DynamicDomainParameters.sequencerSigningTolerance]] valid at the signing timestamp. + * [[scala.Right$]] the topology snapshot that can be used for signing the event + * and verifying the signature on the event; + */ + // TODO(#10040) remove optimistic validation + def validateSigningTimestamp( + syncCryptoApi: SyncCryptoClient[SyncCryptoApi], + signingTimestamp: CantonTimestamp, + sequencingTimestamp: CantonTimestamp, + latestTopologyClientTimestamp: Option[CantonTimestamp], + protocolVersion: ProtocolVersion, + warnIfApproximate: Boolean, + optimistic: Boolean = false, + )(implicit + loggingContext: NamedLoggingContext, + executionContext: ExecutionContext, + ): EitherT[Future, SigningTimestampVerificationError, SyncCryptoApi] = { + + validateSigningTimestampInternal( + syncCryptoApi, + signingTimestamp, + sequencingTimestamp, + latestTopologyClientTimestamp, + protocolVersion, + warnIfApproximate, + optimistic, + )( + SyncCryptoClient.getSnapshotForTimestamp _, + (topology, traceContext) => topology.findDynamicDomainParameters()(traceContext), + ) + } + + def validateSigningTimestampUS( + syncCryptoApi: SyncCryptoClient[SyncCryptoApi], + signingTimestamp: CantonTimestamp, + sequencingTimestamp: CantonTimestamp, + latestTopologyClientTimestamp: Option[CantonTimestamp], + protocolVersion: ProtocolVersion, + warnIfApproximate: Boolean, + optimistic: Boolean = false, + )(implicit + loggingContext: NamedLoggingContext, + executionContext: ExecutionContext, + closeContext: CloseContext, + ): EitherT[FutureUnlessShutdown, SigningTimestampVerificationError, SyncCryptoApi] = { + validateSigningTimestampInternal( + syncCryptoApi, + signingTimestamp, + sequencingTimestamp, + latestTopologyClientTimestamp, + protocolVersion, + warnIfApproximate, + optimistic, + )( + SyncCryptoClient.getSnapshotForTimestampUS _, + (topology, traceContext) => + closeContext.context.performUnlessClosingF("get-dynamic-parameters")( + topology.findDynamicDomainParameters()(traceContext) + )(executionContext, traceContext), + ) + } + + // Base version of validateSigningTimestamp abstracting over the effect type to allow for + // a `Future` and `FutureUnlessShutdown` version. Once we migrate all usages to the US version, this abstraction + // should not be needed anymore + private def validateSigningTimestampInternal[F[_]: Monad]( + syncCryptoApi: SyncCryptoClient[SyncCryptoApi], + signingTimestamp: CantonTimestamp, + sequencingTimestamp: CantonTimestamp, + latestTopologyClientTimestamp: Option[CantonTimestamp], + protocolVersion: ProtocolVersion, + warnIfApproximate: Boolean, + optimistic: Boolean = false, + )( + getSnapshotF: ( + SyncCryptoClient[SyncCryptoApi], + CantonTimestamp, + Option[CantonTimestamp], + ProtocolVersion, + Boolean, + ) => F[SyncCryptoApi], + getDynamicDomainParameters: ( + TopologySnapshot, + TraceContext, + ) => F[Either[String, DynamicDomainParametersWithValidity]], + )(implicit + loggingContext: NamedLoggingContext + ): EitherT[F, SigningTimestampVerificationError, SyncCryptoApi] = { + implicit val traceContext: TraceContext = loggingContext.traceContext + + def snapshotF: F[SyncCryptoApi] = getSnapshotF( + syncCryptoApi, + signingTimestamp, + latestTopologyClientTimestamp, + protocolVersion, + warnIfApproximate, + ) + + def validateWithSnapshot( + snapshot: SyncCryptoApi + ): F[Either[SigningTimestampVerificationError, SyncCryptoApi]] = { + getDynamicDomainParameters(snapshot.ipsSnapshot, traceContext) + .map { dynamicDomainParametersE => + for { + dynamicDomainParameters <- dynamicDomainParametersE.leftMap(NoDynamicDomainParameters) + tolerance = dynamicDomainParameters.sequencerSigningTolerance + withinSigningTolerance = { + import scala.Ordered.orderingToOrdered + tolerance.unwrap >= sequencingTimestamp - signingTimestamp + } + _ <- Either.cond(withinSigningTolerance, (), SigningTimestampTooOld(tolerance)) + } yield snapshot + } + } + + if (signingTimestamp > sequencingTimestamp) { + EitherT.leftT[F, SyncCryptoApi](SigningTimestampAfterSequencingTime) + } else if (optimistic) { + val approximateSnapshot = syncCryptoApi.currentSnapshotApproximation + val approximateSnapshotTime = approximateSnapshot.ipsSnapshot.timestamp + // If the topology client has caught up to the signing timestamp, + // use the right snapshot + if (signingTimestamp <= approximateSnapshotTime) { + EitherT(snapshotF.flatMap(validateWithSnapshot)) + } else { + loggingContext.debug( + s"Validating event at $sequencingTimestamp optimistically with snapshot taken at $approximateSnapshotTime" + ) + EitherT(validateWithSnapshot(approximateSnapshot)) + } + } else if (signingTimestamp == sequencingTimestamp) { + // If the signing timestamp is the same as the sequencing timestamp, + // we don't need to check the tolerance because it is always non-negative. + EitherT.right[SigningTimestampVerificationError](snapshotF) + } else { + EitherT(snapshotF.flatMap(validateWithSnapshot)) + } + } + + sealed trait SigningTimestampVerificationError + extends Product + with Serializable + with PrettyPrinting + case object SigningTimestampAfterSequencingTime extends SigningTimestampVerificationError { + override def pretty: Pretty[SigningTimestampAfterSequencingTime] = + prettyOfObject[SigningTimestampAfterSequencingTime] + } + type SigningTimestampAfterSequencingTime = SigningTimestampAfterSequencingTime.type + + final case class SigningTimestampTooOld(tolerance: NonNegativeFiniteDuration) + extends SigningTimestampVerificationError { + override def pretty: Pretty[SigningTimestampTooOld] = prettyOfClass( + param("tolerance", _.tolerance) + ) + } + + final case class NoDynamicDomainParameters(error: String) + extends SigningTimestampVerificationError { + override def pretty: Pretty[NoDynamicDomainParameters] = prettyOfClass( + param("error", _.error.unquoted) + ) + } +} + +trait SequencedEventValidatorFactory { + + /** Creates a new [[SequencedEventValidator]] to be used for a subscription with the given parameters. + * + * @param initialLastEventProcessedO + * The last event that the sequencer client had validated (and persisted) in case of a resubscription. + * The [[com.digitalasset.canton.sequencing.client.SequencerSubscription]] requests this event again. + * @param unauthenticated Whether the subscription is unauthenticated + */ + def create( + unauthenticated: Boolean + )(implicit loggingContext: NamedLoggingContext): SequencedEventValidator +} + +object SequencedEventValidatorFactory { + + /** Do not validate sequenced events. + * Only use it in case of a programming error and the need to unblock a deployment or + * if you blindly trust the sequencer. + * + * @param warn whether to log a warning + */ + def noValidation( + domainId: DomainId, + warn: Boolean = true, + ): SequencedEventValidatorFactory = new SequencedEventValidatorFactory { + override def create( + unauthenticated: Boolean + )(implicit loggingContext: NamedLoggingContext): SequencedEventValidator = + SequencedEventValidator.noValidation(domainId, warn) + } +} + +/** Validate whether a received event is valid for processing. + * + * @param unauthenticated if true, then the connection is unauthenticated. in such cases, we have to skip some validations. + * @param optimistic if true, we'll try to be optimistic and validate the event possibly with some stale data. this + * means that during sequencer key rolling, a message might have been signed by a key that was just revoked. + * the security impact is very marginal (and an adverse scenario only possible in the async ms of + * this node validating a few inflight transactions). therefore, this parameter should be set to + * true due to performance reasons. + */ +// TODO(#10040) remove optimistic validation +class SequencedEventValidatorImpl( + unauthenticated: Boolean, + optimistic: Boolean, + domainId: DomainId, + protocolVersion: ProtocolVersion, + syncCryptoApi: SyncCryptoClient[SyncCryptoApi], + protected val loggerFactory: NamedLoggerFactory, + override val timeouts: ProcessingTimeout, +)(implicit executionContext: ExecutionContext) + extends SequencedEventValidator + with FlagCloseable + with HasCloseContext + with NamedLogging { + + import SequencedEventValidationError.* + import SequencedEventValidatorImpl.* + + /** Validates that the supplied event is suitable for processing from the prior event. + * Currently the signature not being valid is not considered an error but its validity is returned to the caller + * to allow them to choose what to do with the event. + * If the event is successfully validated (regardless of the signature check) it becomes the event that the event + * in a following call will be validated against. We currently assume this is safe to do as if the event fails to be + * handled by the application then the sequencer client will halt and will need recreating to restart event processing. + * This method must not be called concurrently as it will corrupt the prior event state. + */ + override def validate( + priorEventO: Option[PossiblyIgnoredSerializedEvent], + event: OrdinarySerializedEvent, + sequencerId: SequencerId, + ): EitherT[FutureUnlessShutdown, SequencedEventValidationError[Nothing], Unit] = { + val oldCounter = priorEventO.fold(SequencerCounter.Genesis - 1L)(_.counter) + val newCounter = event.counter + + def checkCounterIncreases: ValidationResult = + Either.cond( + newCounter == oldCounter + 1, + (), + if (newCounter < oldCounter) DecreasingSequencerCounter(newCounter, oldCounter) + else GapInSequencerCounter(newCounter, oldCounter), + ) + + def checkTimestampIncreases: ValidationResult = + priorEventO.traverse_ { prior => + val oldTimestamp = prior.timestamp + val newTimestamp = event.timestamp + Either.cond( + newTimestamp > oldTimestamp, + (), + NonIncreasingTimestamp(newTimestamp, newCounter, oldTimestamp, oldCounter), + ) + } + + // TODO(M99): dishonest sequencer: Check that the node is listed as a recipient on all envelopes in the batch + + for { + _ <- EitherT.fromEither[FutureUnlessShutdown]( + Seq( + checkCounterIncreases, + checkDomainId(event), + checkTimestampIncreases, + ).sequence_ + ) + // Verify the signature only if we know of a prior event. + // Otherwise, this is a fresh subscription and we will get the topology state with the first transaction + // TODO(#4933) Upon a fresh subscription, retrieve the keys via the topology API and validate immediately or + // validate the signature after processing the initial event + _ <- verifySignature(priorEventO, event, sequencerId, protocolVersion) + } yield () + } + + override def validateOnReconnect( + priorEvent0: Option[PossiblyIgnoredSerializedEvent], + reconnectEvent: OrdinarySerializedEvent, + sequencerId: SequencerId, + ): EitherT[FutureUnlessShutdown, SequencedEventValidationError[Nothing], Unit] = { + implicit val traceContext: TraceContext = reconnectEvent.traceContext + val priorEvent = priorEvent0.getOrElse( + ErrorUtil.internalError( + new IllegalStateException( + s"No prior event known even though the sequencer client resubscribes to $sequencerId at sequencer counter ${reconnectEvent.counter}" + ) + ) + ) + val checkFork: Either[SequencedEventValidationError[Nothing], Unit] = priorEvent match { + case ordinaryPrior: OrdinarySerializedEvent => + val oldSequencedEvent = ordinaryPrior.signedEvent.content + val newSequencedEvent = reconnectEvent.signedEvent.content + // We compare the contents of the `SequencedEvent` rather than their serialization + // because the SequencerReader serializes the `SequencedEvent` afresh upon each resubscription + // and the serialization may therefore differ from time to time. This is fine for auditability + // because the sequencer also delivers a new signature on the new serialization. + Either.cond( + oldSequencedEvent == newSequencedEvent, + (), + ForkHappened(oldSequencedEvent.counter, newSequencedEvent, Some(oldSequencedEvent)), + ) + case ignored: IgnoredSequencedEvent[ClosedEnvelope] => + // If the event should be ignored, we nevertheless check the counter + // We merely check timestamp monotonicity, but not the exact timestamp + // because when we ignore unsequenced events, we assign them the least possible timestamp. + Either.cond( + ignored.counter == reconnectEvent.counter && ignored.timestamp <= reconnectEvent.timestamp, + (), + ForkHappened( + ignored.counter, + reconnectEvent.signedEvent.content, + ignored.underlying.map(_.content), + ), + ) + } + + for { + _ <- EitherT.fromEither[FutureUnlessShutdown]( + Seq( + checkDomainId(reconnectEvent), + checkFork, + ).sequence_ + ) + _ <- verifySignature(Some(priorEvent), reconnectEvent, sequencerId, protocolVersion) + } yield () + // do not update the priorEvent because if it was ignored, then it was ignored for a reason. + } + + private def checkDomainId(event: OrdinarySerializedEvent): ValidationResult = { + val receivedDomainId = event.signedEvent.content.domainId + Either.cond(receivedDomainId == domainId, (), BadDomainId(domainId, receivedDomainId)) + } + + private def verifySignature( + priorEventO: Option[PossiblyIgnoredSerializedEvent], + event: OrdinarySerializedEvent, + sequencerId: SequencerId, + protocolVersion: ProtocolVersion, + ): EitherT[FutureUnlessShutdown, SequencedEventValidationError[Nothing], Unit] = { + implicit val traceContext: TraceContext = event.traceContext + if (unauthenticated) { + // TODO(i4933) once we have topology data on the sequencer api, we might fetch the domain keys + // and use the domain keys to validate anything here if we are unauthenticated + logger.debug( + s"Skipping sequenced event validation for counter ${event.counter} and timestamp ${event.timestamp} in unauthenticated subscription from $sequencerId" + ) + EitherT.fromEither[FutureUnlessShutdown](checkNoTimestampOfSigningKey(event)) + } else if (event.counter == SequencerCounter.Genesis) { + // TODO(#4933) This is a fresh subscription. Either fetch the domain keys via a future sequencer API and validate the signature + // or wait until the topology processor has processed the topology information in the first message and then validate the signature. + logger.info( + s"Skipping signature verification of the first sequenced event due to a fresh subscription from $sequencerId" + ) + // The first sequenced event addressed to a member must not specify a signing key timestamp because + // the member will only be able to compute snapshots for the current topology state and later. + EitherT.fromEither[FutureUnlessShutdown](checkNoTimestampOfSigningKey(event)) + } else { + val signingTs = event.signedEvent.timestampOfSigningKey.getOrElse(event.timestamp) + + def doValidate( + optimistic: Boolean + ): EitherT[FutureUnlessShutdown, SequencedEventValidationError[Nothing], Unit] = + for { + snapshot <- SequencedEventValidator + .validateSigningTimestampUS( + syncCryptoApi, + signingTs, + event.timestamp, + lastTopologyClientTimestamp(priorEventO), + protocolVersion, + warnIfApproximate = true, + optimistic, + ) + .leftMap(InvalidTimestampOfSigningKey(event.timestamp, signingTs, _)) + _ <- event.signedEvent + .verifySignature(snapshot, sequencerId, HashPurpose.SequencedEventSignature) + .leftMap[SequencedEventValidationError[Nothing]]( + SignatureInvalid(event.timestamp, signingTs, _) + ) + .mapK(FutureUnlessShutdown.outcomeK) + } yield () + + doValidate(optimistic).leftFlatMap { err => + // When optimistic validation fails, retry with the right snapshot + if (optimistic) { + logger.debug( + s"Optimistic event validation failed with $err. Falling back to validation with the proper topology state." + ) + doValidate(optimistic = false) + } else EitherT.leftT(err) + } + } + } + + private def checkNoTimestampOfSigningKey(event: OrdinarySerializedEvent): ValidationResult = { + event.signedEvent.timestampOfSigningKey.traverse_(tsOfSigningKey => + // Batches addressed to unauthenticated members must not specify a signing key timestamp. + // As some sequencer implementations in some protocol versions set the timestampOfSigningKey field + // always to the sequencing timestamp if no timestamp was requested, + // we tolerate equality. + Either.cond( + tsOfSigningKey == event.timestamp, + (), + TimestampOfSigningKeyNotAllowed(event.timestamp, tsOfSigningKey), + ) + ) + } + + override def validatePekko[E: Pretty]( + subscription: SequencerSubscriptionPekko[E], + priorReconnectEvent: Option[OrdinarySerializedEvent], + sequencerId: SequencerId, + )(implicit + traceContext: TraceContext + ): SequencerSubscriptionPekko[SequencedEventValidationError[E]] = { + def performValidation( + rememberedAndCurrent: NonEmpty[Seq[WithKillSwitch[Either[E, OrdinarySerializedEvent]]]] + ): FutureUnlessShutdown[WithKillSwitch[ + // None if the element should not be emitted + Option[Either[SequencedEventValidationError[E], OrdinarySerializedEvent]] + ]] = + rememberedAndCurrent.last1.traverse { + case Left(err) => FutureUnlessShutdown.pure(Some(Left(UpstreamSubscriptionError(err)))) + case Right(current) => + val validationEF = + if (rememberedAndCurrent.sizeIs <= 1) + validateOnReconnect(priorReconnectEvent, current, sequencerId).value.map( + _.traverse((_: Unit) => None) + ) + else { + val previousEvent = rememberedAndCurrent.head1.unwrap.valueOr { previousErr => + implicit val traceContext: TraceContext = current.traceContext + ErrorUtil.invalidState( + s"Subscription for sequencer $sequencerId delivered an event at counter ${current.counter} after having previously signalled the error $previousErr" + ) + } + // SequencerSubscriptions may stutter on reconnect, e.g., inside a resilient sequencer subscription + val previousEventId = (previousEvent.counter, previousEvent.timestamp) + val currentEventId = (current.counter, current.timestamp) + val stutter = previousEventId == currentEventId + if (stutter) + validateOnReconnect(Some(previousEvent), current, sequencerId).value + .map(_.traverse((_: Unit) => None)) + else + validate(Some(previousEvent), current, sequencerId).value + .map(_.traverse((_: Unit) => Option(current))) + } + validationEF + } + + val validatedSource = subscription.source + .remember(NonNegativeInt.one) + .statefulMapAsyncUS(false) { (failedPreviously, event) => + // Do not start the validation of the next event if the previous one failed. + // Otherwise, we may deadlock on the topology snapshot because the event with the failed validation + // may never reach the topology processor. + if (failedPreviously) + FutureUnlessShutdown.pure(failedPreviously -> event.last1.map(_ => None)) + else + performValidation(event).map { validation => + val failed = validation.unwrap.exists(_.isLeft) + failed -> validation + } + } + // Filter out the stuttering + .mapConcat { + case UnlessShutdown.AbortedDueToShutdown => + // TODO(#13789) should we pull a kill switch here? + None + case UnlessShutdown.Outcome(result) => result.sequence + } + .takeUntilThenDrain(_.isLeft) + SequencerSubscriptionPekko(validatedSource, subscription.health) + } +} + +object SequencedEventValidatorImpl { + private[SequencedEventValidatorImpl] type ValidationResult = + Either[SequencedEventValidationError[Nothing], Unit] + + /** The sequencer client assumes that the topology processor is ticked for every event proecessed, + * even if the event is a [[com.digitalasset.canton.store.SequencedEventStore.IgnoredSequencedEvent]]. + * This is why [[com.digitalasset.canton.sequencing.handlers.DiscardIgnoredEvents]] + * must not be used in application handlers on nodes that support ignoring events. + */ + private[SequencedEventValidatorImpl] def lastTopologyClientTimestamp( + priorEvent: Option[PossiblyIgnoredSerializedEvent] + ): Option[CantonTimestamp] = + priorEvent.map(_.timestamp) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala new file mode 100644 index 0000000000..2581558eb7 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala @@ -0,0 +1,1352 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import cats.data.EitherT +import cats.implicits.catsSyntaxOptionId +import cats.syntax.either.* +import com.daml.metrics.Timed +import com.daml.nameof.NameOf.functionFullName +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.config.* +import com.digitalasset.canton.crypto.{CryptoPureApi, HashPurpose} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.health.{ + CloseableHealthComponent, + ComponentHealthState, + DelegatingMutableHealthComponent, +} +import com.digitalasset.canton.lifecycle.Lifecycle.toCloseableOption +import com.digitalasset.canton.lifecycle.* +import com.digitalasset.canton.logging.pretty.CantonPrettyPrinter +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.metrics.SequencerClientMetrics +import com.digitalasset.canton.protocol.DomainParameters.MaxRequestSize +import com.digitalasset.canton.protocol.DomainParametersLookup.SequencerDomainParameters +import com.digitalasset.canton.protocol.DynamicDomainParametersLookup +import com.digitalasset.canton.protocol.messages.DefaultOpenEnvelope +import com.digitalasset.canton.resource.DbStorage.PassiveInstanceException +import com.digitalasset.canton.sequencing.SequencerAggregator.MessageAggregationConfig +import com.digitalasset.canton.sequencing.* +import com.digitalasset.canton.sequencing.client.SendCallback.CallbackFuture +import com.digitalasset.canton.sequencing.client.SequencerClient.SequencerTransports +import com.digitalasset.canton.sequencing.client.SequencerClientSubscriptionError.* +import com.digitalasset.canton.sequencing.client.transports.{ + SequencerClientTransport, + SequencerClientTransportPekko, +} +import com.digitalasset.canton.sequencing.handlers.{ + CleanSequencerCounterTracker, + StoreSequencedEvent, + ThrottlingApplicationEventHandler, +} +import com.digitalasset.canton.sequencing.protocol.* +import com.digitalasset.canton.store.CursorPrehead.SequencerCounterCursorPrehead +import com.digitalasset.canton.store.SequencedEventStore.PossiblyIgnoredSequencedEvent +import com.digitalasset.canton.store.* +import com.digitalasset.canton.time.{Clock, DomainTimeTracker} +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.tracing.{Spanning, TraceContext, Traced} +import com.digitalasset.canton.util.FutureUtil.defaultStackTraceFilter +import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.canton.util.* +import com.digitalasset.canton.util.retry.RetryUtil.AllExnRetryable +import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.{DiscardOps, SequencerAlias, SequencerCounter} +import com.google.common.annotations.VisibleForTesting +import io.opentelemetry.api.trace.Tracer +import org.slf4j.event.Level + +import java.nio.file.Path +import java.time.Duration as JDuration +import java.util.concurrent.atomic.AtomicReference +import java.util.concurrent.{BlockingQueue, LinkedBlockingQueue} +import scala.concurrent.* +import scala.concurrent.duration.* +import scala.util.{Failure, Success, Try} + +trait SequencerClient extends SequencerClientSend with FlagCloseable { + + /** Sends a request to sequence a deliver event to the sequencer. + * This method merely dispatches to one of the other methods (`sendAsync` or `sendAsyncUnauthenticated`) + * depending if member is Authenticated or Unauthenticated. + */ + def sendAsyncUnauthenticatedOrNot( + batch: Batch[DefaultOpenEnvelope], + sendType: SendType = SendType.Other, + timestampOfSigningKey: Option[CantonTimestamp] = None, + maxSequencingTime: CantonTimestamp = generateMaxSequencingTime, + messageId: MessageId = generateMessageId, + aggregationRule: Option[AggregationRule] = None, + callback: SendCallback = SendCallback.empty, + )(implicit traceContext: TraceContext): EitherT[Future, SendAsyncClientError, Unit] + + /** Does the same as [[sendAsync]], except that this method is supposed to be used + * only by unauthenticated members for very specific operations that do not require authentication + * such as requesting that a participant's topology data gets accepted by the topology manager + */ + def sendAsyncUnauthenticated( + batch: Batch[DefaultOpenEnvelope], + sendType: SendType = SendType.Other, + maxSequencingTime: CantonTimestamp = generateMaxSequencingTime, + messageId: MessageId = generateMessageId, + callback: SendCallback = SendCallback.empty, + )(implicit traceContext: TraceContext): EitherT[Future, SendAsyncClientError, Unit] + + /** Create a subscription for sequenced events for this member, + * starting after the prehead in the `sequencerCounterTrackerStore`. + * + * The `eventHandler` is monitored by [[com.digitalasset.canton.sequencing.handlers.CleanSequencerCounterTracker]] + * so that the `sequencerCounterTrackerStore` advances the prehead + * when (a batch of) events has been successfully processed by the `eventHandler` (synchronously and asynchronously). + * + * @see subscribe for the description of the `eventHandler` and the `timeTracker` + */ + def subscribeTracking( + sequencerCounterTrackerStore: SequencerCounterTrackerStore, + eventHandler: PossiblyIgnoredApplicationHandler[ClosedEnvelope], + timeTracker: DomainTimeTracker, + onCleanHandler: Traced[SequencerCounterCursorPrehead] => Unit = _ => (), + )(implicit traceContext: TraceContext): Future[Unit] + + /** Create a subscription for sequenced events for this member, + * starting after the last event in the [[com.digitalasset.canton.store.SequencedEventStore]] up to `priorTimestamp`. + * A sequencer client can only have a single subscription - additional subscription attempts will throw an exception. + * When an event is received, we will check the pending sends and invoke the provided call-backs with the send result + * (which can be deliver or timeout) before invoking the `eventHandler`. + * + * If the [[com.digitalasset.canton.store.SequencedEventStore]] contains events after `priorTimestamp`, + * the handler is first fed with these events before the subscription is established, + * starting at the last event found in the [[com.digitalasset.canton.store.SequencedEventStore]]. + * + * @param priorTimestamp The timestamp of the event prior to where the event processing starts. + * If [[scala.None$]], the subscription starts at the [[initialCounterLowerBound]]. + * @param cleanPreheadTsO The timestamp of the clean prehead sequencer counter, if known. + * @param eventHandler A function handling the events. + * @param timeTracker Tracker for operations requiring the current domain time. Only updated with received events and not previously stored events. + * @param fetchCleanTimestamp A function for retrieving the latest clean timestamp to use for periodic acknowledgements + * @return The future completes after the subscription has been established or when an error occurs before that. + * In particular, synchronous processing of events from the [[com.digitalasset.canton.store.SequencedEventStore]] + * runs before the future completes. + */ + def subscribeAfter( + priorTimestamp: CantonTimestamp, + cleanPreheadTsO: Option[CantonTimestamp], + eventHandler: PossiblyIgnoredApplicationHandler[ClosedEnvelope], + timeTracker: DomainTimeTracker, + fetchCleanTimestamp: PeriodicAcknowledgements.FetchCleanTimestamp, + )(implicit traceContext: TraceContext): Future[Unit] + + /** Does the same as [[subscribeAfter]], except that this method is supposed to be used + * only by unauthenticated members + */ + def subscribeAfterUnauthenticated( + priorTimestamp: CantonTimestamp, + eventHandler: PossiblyIgnoredApplicationHandler[ClosedEnvelope], + timeTracker: DomainTimeTracker, + )(implicit traceContext: TraceContext): Future[Unit] + + /** Future which is completed when the client is not functional any more and is ready to be closed. + * The value with which the future is completed will indicate the reason for completion. + */ + def completion: Future[SequencerClient.CloseReason] + + def changeTransport( + sequencerTransports: SequencerTransports[?] + )(implicit traceContext: TraceContext): Future[Unit] + + /** Returns a future that completes after asynchronous processing has completed for all events + * whose synchronous processing has been completed prior to this call. May complete earlier if event processing + * has failed. + */ + @VisibleForTesting + def flush(): Future[Unit] + + def healthComponent: CloseableHealthComponent + + /** Acknowledge that we have successfully processed all events up to and including the given timestamp. + * The client should then never subscribe for events from before this point. + */ + private[client] def acknowledge(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[Unit] + + def acknowledgeSigned(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): EitherT[Future, String, Unit] + + /** The sequencer counter at which the first subscription starts */ + protected def initialCounterLowerBound: SequencerCounter +} + +/** The sequencer client facilitates access to the individual domain sequencer. A client centralizes the + * message signing operations, as well as the handling and storage of message receipts and delivery proofs, + * such that this functionality does not have to be duplicated throughout the participant node. + */ +class SequencerClientImpl( + val domainId: DomainId, + val member: Member, + sequencerTransports: SequencerTransports[?], + val config: SequencerClientConfig, + testingConfig: TestingConfigInternal, + val protocolVersion: ProtocolVersion, + domainParametersLookup: DynamicDomainParametersLookup[SequencerDomainParameters], + override val timeouts: ProcessingTimeout, + eventValidatorFactory: SequencedEventValidatorFactory, + clock: Clock, + val requestSigner: RequestSigner, + private val sequencedEventStore: SequencedEventStore, + sendTracker: SendTracker, + metrics: SequencerClientMetrics, + recorderO: Option[SequencerClientRecorder], + replayEnabled: Boolean, + cryptoPureApi: CryptoPureApi, + loggingConfig: LoggingConfig, + val loggerFactory: NamedLoggerFactory, + futureSupervisor: FutureSupervisor, + override protected val initialCounterLowerBound: SequencerCounter, +)(implicit executionContext: ExecutionContext, tracer: Tracer) + extends SequencerClient + with FlagCloseableAsync + with NamedLogging + with HasFlushFuture + with Spanning + with HasCloseContext { + + private val sequencerAggregator = + new SequencerAggregator( + cryptoPureApi, + config.eventInboxSize, + loggerFactory, + MessageAggregationConfig( + sequencerTransports.expectedSequencers, + sequencerTransports.sequencerTrustThreshold, + ), + timeouts, + futureSupervisor, + ) + + private val sequencersTransportState = + new SequencersTransportState( + sequencerTransports, + timeouts, + loggerFactory, + ) + + sequencersTransportState.completion.onComplete { _ => + logger.debug( + "The sequencer subscriptions have been closed. Closing sequencer client." + )(TraceContext.empty) + close() + } + + private lazy val deferredSubscriptionHealth = + new DelegatingMutableHealthComponent[SequencerId]( + loggerFactory, + SequencerClient.healthName, + timeouts, + states => + SequencerAggregator + .aggregateHealthResult(states, sequencersTransportState.getSequencerTrustThreshold), + ComponentHealthState.failed("Disconnected from domain"), + ) + + val healthComponent: CloseableHealthComponent = deferredSubscriptionHealth + + private val periodicAcknowledgementsRef = + new AtomicReference[Option[PeriodicAcknowledgements]](None) + + /** Stash for storing the failure that comes out of an application handler, either synchronously or asynchronously. + * If non-empty, no further events should be sent to the application handler. + */ + private val applicationHandlerFailure: SingleUseCell[ApplicationHandlerFailure] = + new SingleUseCell[ApplicationHandlerFailure] + + /** Completed iff the handler is idle. */ + private val handlerIdle: AtomicReference[Promise[Unit]] = new AtomicReference( + Promise.successful(()) + ) + + private lazy val printer = + new CantonPrettyPrinter(loggingConfig.api.maxStringLength, loggingConfig.api.maxMessageLines) + + override def sendAsyncUnauthenticatedOrNot( + batch: Batch[DefaultOpenEnvelope], + sendType: SendType = SendType.Other, + timestampOfSigningKey: Option[CantonTimestamp] = None, + maxSequencingTime: CantonTimestamp = generateMaxSequencingTime, + messageId: MessageId = generateMessageId, + aggregationRule: Option[AggregationRule] = None, + callback: SendCallback = SendCallback.empty, + )(implicit traceContext: TraceContext): EitherT[Future, SendAsyncClientError, Unit] = { + member match { + case _: AuthenticatedMember => + sendAsync( + batch = batch, + sendType = sendType, + timestampOfSigningKey = timestampOfSigningKey, + maxSequencingTime = maxSequencingTime, + messageId = messageId, + aggregationRule = aggregationRule, + callback = callback, + ) + case _: UnauthenticatedMemberId => + sendAsyncUnauthenticated( + batch = batch, + sendType = sendType, + maxSequencingTime = maxSequencingTime, + messageId = messageId, + callback = callback, + ) + } + } + + override def sendAsync( + batch: Batch[DefaultOpenEnvelope], + sendType: SendType = SendType.Other, + timestampOfSigningKey: Option[CantonTimestamp] = None, + maxSequencingTime: CantonTimestamp = generateMaxSequencingTime, + messageId: MessageId = generateMessageId, + aggregationRule: Option[AggregationRule] = None, + callback: SendCallback = SendCallback.empty, + )(implicit traceContext: TraceContext): EitherT[Future, SendAsyncClientError, Unit] = + for { + _ <- EitherT.cond[Future]( + member.isAuthenticated, + (), + SendAsyncClientError.RequestInvalid( + "Only authenticated members can use the authenticated send operation" + ): SendAsyncClientError, + ) + // TODO(#12950): Validate that group addresses map to at least one member + _ <- EitherT.cond[Future]( + timestampOfSigningKey.isEmpty || batch.envelopes.forall( + _.recipients.allRecipients.forall { + case MemberRecipient(m) => m.isAuthenticated + case _ => true + } + ), + (), + SendAsyncClientError.RequestInvalid( + "Requests addressed to unauthenticated members must not specify a timestamp for the signing key" + ): SendAsyncClientError, + ) + result <- sendAsyncInternal( + batch, + requiresAuthentication = true, + sendType, + timestampOfSigningKey, + maxSequencingTime, + messageId, + aggregationRule, + callback, + ) + } yield result + + /** Does the same as [[sendAsync]], except that this method is supposed to be used + * only by unauthenticated members for very specific operations that do not require authentication + * such as requesting that a participant's topology data gets accepted by the topology manager + */ + override def sendAsyncUnauthenticated( + batch: Batch[DefaultOpenEnvelope], + sendType: SendType = SendType.Other, + maxSequencingTime: CantonTimestamp = generateMaxSequencingTime, + messageId: MessageId = generateMessageId, + callback: SendCallback = SendCallback.empty, + )(implicit traceContext: TraceContext): EitherT[Future, SendAsyncClientError, Unit] = + if (member.isAuthenticated) + EitherT.leftT( + SendAsyncClientError.RequestInvalid( + "Only unauthenticated members can use the unauthenticated send operation" + ) + ) + else + sendAsyncInternal( + batch, + requiresAuthentication = false, + sendType, + // Requests involving unauthenticated members must not specify a signing key + None, + maxSequencingTime, + messageId, + None, + callback, + ) + + private def checkRequestSize( + request: SubmissionRequest, + maxRequestSize: MaxRequestSize, + ): Either[SendAsyncClientError, Unit] = { + // We're ignoring the size of the SignedContent wrapper here. + // TODO(#12320) Look into what we really want to do here + val serializedRequestSize = request.toProtoV1.serializedSize + + Either.cond( + serializedRequestSize <= maxRequestSize.unwrap, + (), + SendAsyncClientError.RequestInvalid( + s"Batch size ($serializedRequestSize bytes) is exceeding maximum size ($maxRequestSize bytes) for domain $domainId" + ), + ) + } + + private def sendAsyncInternal( + batch: Batch[DefaultOpenEnvelope], + requiresAuthentication: Boolean, + sendType: SendType, + timestampOfSigningKey: Option[CantonTimestamp], + maxSequencingTime: CantonTimestamp, + messageId: MessageId, + aggregationRule: Option[AggregationRule], + callback: SendCallback, + )(implicit traceContext: TraceContext): EitherT[Future, SendAsyncClientError, Unit] = + withSpan("SequencerClient.sendAsync") { implicit traceContext => span => + val requestE = SubmissionRequest + .create( + member, + messageId, + sendType.isRequest, + Batch.closeEnvelopes(batch), + maxSequencingTime, + timestampOfSigningKey, + aggregationRule, + SubmissionRequest.protocolVersionRepresentativeFor(protocolVersion), + ) + .leftMap(err => + SendAsyncClientError.RequestInvalid(s"Unable to get submission request: $err") + ) + + if (loggingConfig.eventDetails) { + requestE match { + case Left(err) => + logger.debug( + s"Will not send async batch ${printer.printAdHoc(batch)} because of invalid request: $err" + ) + case Right(request) => + logger.debug( + s"About to send async batch ${printer.printAdHoc(batch)} as request ${printer.printAdHoc(request)}" + ) + } + } + + span.setAttribute("member", member.show) + span.setAttribute("message_id", messageId.unwrap) + + // avoid emitting a warning during the first sequencing of the topology snapshot + val warnOnUsingDefaults = member match { + case _: ParticipantId => true + case _ => false + } + val domainParamsF = + EitherTUtil.fromFuture( + domainParametersLookup.getApproximateOrDefaultValue(warnOnUsingDefaults), + throwable => + SendAsyncClientError.RequestFailed( + s"failed to retrieve maxRequestSize because ${throwable.getMessage}" + ), + ) + def trackSend: EitherT[Future, SendAsyncClientError, Unit] = + sendTracker + .track(messageId, maxSequencingTime, callback) + .leftMap[SendAsyncClientError] { case SavePendingSendError.MessageIdAlreadyTracked => + // we're already tracking this message id + SendAsyncClientError.DuplicateMessageId + } + + if (replayEnabled) { + for { + request <- EitherT.fromEither[Future](requestE) + domainParams <- domainParamsF + _ <- EitherT.fromEither[Future](checkRequestSize(request, domainParams.maxRequestSize)) + } yield { + // Invoke the callback immediately, because it will not be triggered by replayed messages, + // as they will very likely have mismatching message ids. + val dummySendResult = + SendResult.Success( + Deliver.create( + SequencerCounter.Genesis, + CantonTimestamp.now(), + domainId, + None, + Batch(List.empty, protocolVersion), + protocolVersion, + ) + ) + callback(UnlessShutdown.Outcome(dummySendResult)) + } + } else { + for { + request <- EitherT.fromEither[Future](requestE) + domainParams <- domainParamsF + _ <- EitherT.fromEither[Future](checkRequestSize(request, domainParams.maxRequestSize)) + _ <- trackSend + _ = recorderO.foreach(_.recordSubmission(request)) + _ <- performSend(messageId, request, requiresAuthentication) + } yield () + } + } + + /** Perform the send, without any check. + */ + private def performSend( + messageId: MessageId, + request: SubmissionRequest, + requiresAuthentication: Boolean, + )(implicit traceContext: TraceContext): EitherT[Future, SendAsyncClientError, Unit] = { + EitherTUtil + .timed(metrics.submissions.sends) { + val timeout = timeouts.network.duration + if (requiresAuthentication) { + for { + signedContent <- requestSigner + .signRequest(request, HashPurpose.SubmissionRequestSignature) + .leftMap { err => + val message = s"Error signing submission request $err" + logger.error(message) + SendAsyncClientError.RequestRefused(SendAsyncError.RequestRefused(message)) + } + _ <- sequencersTransportState.transport.sendAsyncSigned(signedContent, timeout) + } yield () + + } else + sequencersTransportState.transport.sendAsyncUnauthenticatedVersioned(request, timeout) + } + .leftSemiflatMap { err => + // increment appropriate error metrics + err match { + case SendAsyncClientError.RequestRefused(SendAsyncError.Overloaded(_)) => + metrics.submissions.overloaded.inc() + case _ => + } + + // cancel pending send now as we know the request will never cause a sequenced result + logger.debug(s"Cancelling the pending send as the sequencer returned error: $err") + sendTracker.cancelPendingSend(messageId).map(_ => err) + } + } + + override def generateMaxSequencingTime: CantonTimestamp = + clock.now.add(config.defaultMaxSequencingTimeOffset.asJava) + + override def generateMessageId: MessageId = MessageId.randomMessageId() + + /** Create a subscription for sequenced events for this member, + * starting after the prehead in the `sequencerCounterTrackerStore`. + * + * The `eventHandler` is monitored by [[com.digitalasset.canton.sequencing.handlers.CleanSequencerCounterTracker]] + * so that the `sequencerCounterTrackerStore` advances the prehead + * when (a batch of) events has been successfully processed by the `eventHandler` (synchronously and asynchronously). + * + * @see subscribe for the description of the `eventHandler` and the `timeTracker` + */ + def subscribeTracking( + sequencerCounterTrackerStore: SequencerCounterTrackerStore, + eventHandler: PossiblyIgnoredApplicationHandler[ClosedEnvelope], + timeTracker: DomainTimeTracker, + onCleanHandler: Traced[SequencerCounterCursorPrehead] => Unit = _ => (), + )(implicit traceContext: TraceContext): Future[Unit] = { + sequencerCounterTrackerStore.preheadSequencerCounter.flatMap { cleanPrehead => + val priorTimestamp = cleanPrehead.fold(CantonTimestamp.MinValue)( + _.timestamp + ) // Sequencer client will feed events right after this ts to the handler. + val cleanSequencerCounterTracker = new CleanSequencerCounterTracker( + sequencerCounterTrackerStore, + onCleanHandler, + loggerFactory, + ) + subscribeAfter( + priorTimestamp, + cleanPrehead.map(_.timestamp), + cleanSequencerCounterTracker(eventHandler), + timeTracker, + PeriodicAcknowledgements.fetchCleanCounterFromStore(sequencerCounterTrackerStore), + ) + } + } + + /** Create a subscription for sequenced events for this member, + * starting after the last event in the [[com.digitalasset.canton.store.SequencedEventStore]] up to `priorTimestamp`. + * A sequencer client can only have a single subscription - additional subscription attempts will throw an exception. + * When an event is received, we will check the pending sends and invoke the provided call-backs with the send result + * (which can be deliver or timeout) before invoking the `eventHandler`. + * + * If the [[com.digitalasset.canton.store.SequencedEventStore]] contains events after `priorTimestamp`, + * the handler is first fed with these events before the subscription is established, + * starting at the last event found in the [[com.digitalasset.canton.store.SequencedEventStore]]. + * + * @param priorTimestamp The timestamp of the event prior to where the event processing starts. + * If [[scala.None$]], the subscription starts at the [[com.digitalasset.canton.data.CounterCompanion.Genesis]]. + * @param cleanPreheadTsO The timestamp of the clean prehead sequencer counter, if known. + * @param eventHandler A function handling the events. + * @param timeTracker Tracker for operations requiring the current domain time. Only updated with received events and not previously stored events. + * @param fetchCleanTimestamp A function for retrieving the latest clean timestamp to use for periodic acknowledgements + * @return The future completes after the subscription has been established or when an error occurs before that. + * In particular, synchronous processing of events from the [[com.digitalasset.canton.store.SequencedEventStore]] + * runs before the future completes. + */ + def subscribeAfter( + priorTimestamp: CantonTimestamp, + cleanPreheadTsO: Option[CantonTimestamp], + eventHandler: PossiblyIgnoredApplicationHandler[ClosedEnvelope], + timeTracker: DomainTimeTracker, + fetchCleanTimestamp: PeriodicAcknowledgements.FetchCleanTimestamp, + )(implicit traceContext: TraceContext): Future[Unit] = + subscribeAfterInternal( + priorTimestamp, + cleanPreheadTsO, + eventHandler, + timeTracker, + fetchCleanTimestamp, + requiresAuthentication = true, + ) + + /** Does the same as [[subscribeAfter]], except that this method is supposed to be used + * only by unauthenticated members + * + * The method does not verify the signature of the server. + */ + def subscribeAfterUnauthenticated( + priorTimestamp: CantonTimestamp, + eventHandler: PossiblyIgnoredApplicationHandler[ClosedEnvelope], + timeTracker: DomainTimeTracker, + )(implicit traceContext: TraceContext): Future[Unit] = + subscribeAfterInternal( + priorTimestamp, + // We do not track cleanliness for unauthenticated subscriptions + cleanPreheadTsO = None, + eventHandler, + timeTracker, + PeriodicAcknowledgements.noAcknowledgements, + requiresAuthentication = false, + ) + + private def subscribeAfterInternal( + priorTimestamp: CantonTimestamp, + cleanPreheadTsO: Option[CantonTimestamp], + nonThrottledEventHandler: PossiblyIgnoredApplicationHandler[ClosedEnvelope], + timeTracker: DomainTimeTracker, + fetchCleanTimestamp: PeriodicAcknowledgements.FetchCleanTimestamp, + requiresAuthentication: Boolean, + )(implicit traceContext: TraceContext): Future[Unit] = { + val throttledEventHandler = ThrottlingApplicationEventHandler.throttle( + config.maximumInFlightEventBatches, + nonThrottledEventHandler, + metrics, + ) + val subscriptionF = performUnlessClosingUSF(functionFullName) { + for { + initialPriorEventO <- FutureUnlessShutdown.outcomeF( + sequencedEventStore + .find(SequencedEventStore.LatestUpto(priorTimestamp)) + .toOption + .value + ) + _ = if (initialPriorEventO.isEmpty) { + logger.info(s"No event found up to $priorTimestamp. Resubscribing from the beginning.") + } + _ = cleanPreheadTsO.zip(initialPriorEventO).fold(()) { + case (cleanPreheadTs, initialPriorEvent) => + ErrorUtil.requireArgument( + initialPriorEvent.timestamp <= cleanPreheadTs, + s"The initial prior event's timestamp ${initialPriorEvent.timestamp} is after the clean prehead at $cleanPreheadTs.", + ) + } + + // bulk-feed the event handler with everything that we already have in the SequencedEventStore + replayStartTimeInclusive = initialPriorEventO + .fold(CantonTimestamp.MinValue)(_.timestamp) + .immediateSuccessor + _ = logger.info( + s"Processing events from the SequencedEventStore from $replayStartTimeInclusive on" + ) + + replayEvents <- FutureUnlessShutdown.outcomeF( + sequencedEventStore + .findRange( + SequencedEventStore + .ByTimestampRange(replayStartTimeInclusive, CantonTimestamp.MaxValue), + limit = None, + ) + .valueOr { overlap => + ErrorUtil.internalError( + new IllegalStateException( + s"Sequenced event store's pruning at ${overlap.pruningStatus.timestamp} is at or after the resubscription at $replayStartTimeInclusive." + ) + ) + } + ) + subscriptionStartsAt = replayEvents.headOption.fold( + cleanPreheadTsO.fold(SubscriptionStart.FreshSubscription: SubscriptionStart)( + SubscriptionStart.CleanHeadResubscriptionStart + ) + )(replayEv => + SubscriptionStart.ReplayResubscriptionStart(replayEv.timestamp, cleanPreheadTsO) + ) + _ = replayEvents.lastOption + .orElse(initialPriorEventO) + .foreach(event => timeTracker.subscriptionResumesAfter(event.timestamp)) + _ <- throttledEventHandler.subscriptionStartsAt(subscriptionStartsAt, timeTracker) + + eventBatches = replayEvents.grouped(config.eventInboxSize.unwrap) + _ <- FutureUnlessShutdown.outcomeF( + MonadUtil + .sequentialTraverse_(eventBatches)(processEventBatch(throttledEventHandler, _)) + .valueOr(err => throw SequencerClientSubscriptionException(err)) + ) + } yield { + val preSubscriptionEvent = replayEvents.lastOption.orElse(initialPriorEventO) + // previously seen counter takes precedence over the lower bound + val firstCounter = preSubscriptionEvent.fold(initialCounterLowerBound)(_.counter + 1) + val monotonicityChecker = new SequencedEventMonotonicityChecker( + firstCounter, + preSubscriptionEvent.fold(CantonTimestamp.MinValue)(_.timestamp), + loggerFactory, + ) + val eventHandler = monotonicityChecker.handler( + StoreSequencedEvent(sequencedEventStore, domainId, loggerFactory).apply( + timeTracker.wrapHandler(throttledEventHandler) + ) + ) + sequencerTransports.sequencerIdToTransportMap.keySet.foreach { sequencerId => + createSubscription( + sequencerId, + preSubscriptionEvent, + requiresAuthentication, + eventHandler, + ).discard + } + + // periodically acknowledge that we've successfully processed up to the clean counter + // We only need to it setup once; the sequencer client will direct the acknowledgements to the + // right transport. + if (requiresAuthentication) { // unauthenticated members don't need to ack + periodicAcknowledgementsRef.compareAndSet( + None, + PeriodicAcknowledgements + .create( + config.acknowledgementInterval.underlying, + deferredSubscriptionHealth.getState.isOk, + SequencerClientImpl.this, + fetchCleanTimestamp, + clock, + timeouts, + loggerFactory, + ) + .some, + ) + } else None + } + } + + // we may have actually not created a subscription if we have been closed + val loggedAbortF = subscriptionF.unwrap.map { + case UnlessShutdown.AbortedDueToShutdown => + logger.info("Ignoring the sequencer subscription request as the client is being closed") + case UnlessShutdown.Outcome(_subscription) => + // Everything is fine, so no need to log anything. + () + } + FutureUtil.logOnFailure(loggedAbortF, "Sequencer subscription failed") + } + + private def createSubscription( + sequencerId: SequencerId, + preSubscriptionEvent: Option[PossiblyIgnoredSerializedEvent], + requiresAuthentication: Boolean, + eventHandler: OrdinaryApplicationHandler[ClosedEnvelope], + )(implicit + traceContext: TraceContext + ): ResilientSequencerSubscription[SequencerClientSubscriptionError] = { + // previously seen counter takes precedence over the lower bound + val nextCounter = preSubscriptionEvent.fold(initialCounterLowerBound)(_.counter) + val eventValidator = eventValidatorFactory.create(unauthenticated = !requiresAuthentication) + logger.info( + s"Starting subscription for alias=$sequencerId at timestamp ${preSubscriptionEvent + .map(_.timestamp)}; next counter $nextCounter" + ) + + val eventDelay: DelaySequencedEvent = { + val first = testingConfig.testSequencerClientFor.find(elem => + elem.memberName == member.uid.id.unwrap && + elem.domainName == domainId.unwrap.id.unwrap + ) + + first match { + case Some(value) => + DelayedSequencerClient.registerAndCreate( + value.environmentId, + domainId, + member.uid.toString, + ) + case None => NoDelay + } + } + + val subscriptionHandler = new SubscriptionHandler( + eventHandler, + eventValidator, + eventDelay, + preSubscriptionEvent, + sequencerId, + ) + + val subscription = ResilientSequencerSubscription[SequencerClientSubscriptionError]( + domainId, + protocolVersion, + member, + sequencersTransportState.transport(sequencerId), + subscriptionHandler.handleEvent, + nextCounter, + config.initialConnectionRetryDelay.underlying, + config.warnDisconnectDelay.underlying, + config.maxConnectionRetryDelay.underlying, + timeouts, + requiresAuthentication, + loggerFactory, + ) + + deferredSubscriptionHealth.set(sequencerId, subscription) + + sequencersTransportState + .addSubscription( + sequencerId, + subscription, + eventValidator, + ) + + // now start the subscription + subscription.start + + subscription + } + + private class SubscriptionHandler( + applicationHandler: OrdinaryApplicationHandler[ClosedEnvelope], + eventValidator: SequencedEventValidator, + processingDelay: DelaySequencedEvent, + initialPriorEvent: Option[PossiblyIgnoredSerializedEvent], + sequencerId: SequencerId, + ) { + + // keep track of the last event that we processed. In the event the SequencerClient is recreated or that our [[ResilientSequencerSubscription]] reconnects + // we'll restart from the last successfully processed event counter and we'll validate it is still the last event we processed and that we're not seeing + // a sequencer fork. + private val priorEvent = + new AtomicReference[Option[PossiblyIgnoredSerializedEvent]](initialPriorEvent) + + def handleEvent( + serializedEvent: OrdinarySerializedEvent + ): Future[Either[SequencerClientSubscriptionError, Unit]] = { + implicit val traceContext: TraceContext = serializedEvent.traceContext + // Process the event only if no failure has been detected + val futureUS = applicationHandlerFailure.get.fold { + recorderO.foreach(_.recordEvent(serializedEvent)) + + // to ensure that we haven't forked since we last connected, we actually subscribe from the event we last + // successfully processed and do another round of validations on it to ensure it's the same event we really + // did last process. However if successful, there's no need to give it to the application handler or to store + // it as we're really sure we've already processed it. + // we'll also see the last event replayed if the resilient sequencer subscription reconnects. + val isReplayOfPriorEvent = priorEvent.get().map(_.counter).contains(serializedEvent.counter) + + if (isReplayOfPriorEvent) { + // just validate + logger.debug( + s"Do not handle event with sequencerCounter ${serializedEvent.counter}, as it is replayed and has already been handled." + ) + eventValidator + .validateOnReconnect(priorEvent.get(), serializedEvent, sequencerId) + .leftMap[SequencerClientSubscriptionError](EventValidationError) + .value + } else { + logger.debug( + s"Validating sequenced event coming from $sequencerId with counter ${serializedEvent.counter} and timestamp ${serializedEvent.timestamp}" + ) + (for { + _ <- EitherT.liftF( + performUnlessClosingF("processing-delay")(processingDelay.delay(serializedEvent)) + ) + _ <- eventValidator + .validate(priorEvent.get(), serializedEvent, sequencerId) + .leftMap[SequencerClientSubscriptionError](EventValidationError) + _ = priorEvent.set(Some(serializedEvent)) + + toSignalHandler <- EitherT( + sequencerAggregator + .combineAndMergeEvent( + sequencerId, + serializedEvent, + ) + ) + .leftMap[SequencerClientSubscriptionError](EventAggregationError) + } yield + if (toSignalHandler) { + signalHandler(applicationHandler) + }).value + } + }(err => FutureUnlessShutdown.pure(Left(err))) + + futureUS.onShutdown(Left(SequencerClientSubscriptionError.ApplicationHandlerShutdown)) + } + + // Here is how shutdown works: + // 1. we stop injecting new events even if the handler is idle using the performUnlessClosing, + // 2. the synchronous processing will mark handlerIdle as not completed, and once started, will be added to the flush + // the performUnlessClosing will guard us from entering the close method (and starting to flush) before we've successfully + // registered with the flush future + // 3. once the synchronous processing finishes, it will mark the `handlerIdle` as completed and complete the flush future + // 4. before the synchronous processing terminates and before it marks the handler to be idle again, + // it will add the async processing to the flush future. + // Consequently, on shutdown, we first have to wait on the flush future. + // a. No synchronous event will be added to the flush future anymore by the signalHandler + // due to the performUnlessClosing. Therefore, we can be sure that once the flush future terminates + // during shutdown, that the synchronous processing has completed and nothing new has been added. + // b. However, the synchronous event processing will be adding async processing to the flush future in the + // meantime. This means that the flush future we are waiting on might be outdated. + // Therefore, we have to wait on the flush future again. We can then be sure that all asynchronous + // futures have been added in the meantime as the synchronous flush future finished. + // c. I (rv) think that waiting on the `handlerIdle` is a unnecessary for shutdown as it does the + // same as the flush future. We only need it to ensure we don't start the sequential processing in parallel. + private def signalHandler( + eventHandler: OrdinaryApplicationHandler[ClosedEnvelope] + )(implicit traceContext: TraceContext): Unit = performUnlessClosing(functionFullName) { + val isIdle = blocking { + synchronized { + val oldPromise = handlerIdle.getAndUpdate(p => if (p.isCompleted) Promise() else p) + oldPromise.isCompleted + } + } + if (isIdle) { + val handlingF = handleReceivedEventsUntilEmpty(eventHandler) + addToFlushAndLogError("invoking the application handler")(handlingF) + } + }.discard + + private def handleReceivedEventsUntilEmpty( + eventHandler: OrdinaryApplicationHandler[ClosedEnvelope] + ): Future[Unit] = { + val inboxSize = config.eventInboxSize.unwrap + val javaEventList = new java.util.ArrayList[OrdinarySerializedEvent](inboxSize) + if (sequencerAggregator.eventQueue.drainTo(javaEventList, inboxSize) > 0) { + import scala.jdk.CollectionConverters.* + val handlerEvents = javaEventList.asScala.toSeq + + def stopHandler(): Unit = blocking { + this.synchronized { val _ = handlerIdle.get().success(()) } + } + + sendTracker + .update(handlerEvents) + .flatMap(_ => processEventBatch(eventHandler, handlerEvents).value) + .transformWith { + case Success(Right(())) => handleReceivedEventsUntilEmpty(eventHandler) + case Success(Left(_)) | Failure(_) => + // `processEventBatch` has already set `applicationHandlerFailure` so we don't need to propagate the error. + stopHandler() + Future.unit + } + } else { + val stillBusy = blocking { + this.synchronized { + val idlePromise = handlerIdle.get() + if (sequencerAggregator.eventQueue.isEmpty) { + // signalHandler must not be executed here, because that would lead to lost signals. + idlePromise.success(()) + } + // signalHandler must not be executed here, because that would lead to duplicate invocations. + !idlePromise.isCompleted + } + } + + if (stillBusy) { + handleReceivedEventsUntilEmpty(eventHandler) + } else { + Future.unit + } + } + } + } + + /** If the returned future fails, contains a [[scala.Left$]] + * or [[com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown]] + * then [[applicationHandlerFailure]] contains an error. + */ + private def processEventBatch[ + Box[+X <: Envelope[?]] <: PossiblyIgnoredSequencedEvent[X], + Env <: Envelope[?], + ]( + eventHandler: ApplicationHandler[Lambda[`+X <: Envelope[_]` => Traced[Seq[Box[X]]]], Env], + eventBatch: Seq[Box[Env]], + ): EitherT[Future, ApplicationHandlerFailure, Unit] = + NonEmpty.from(eventBatch).fold(EitherT.pure[Future, ApplicationHandlerFailure](())) { + eventBatchNE => + applicationHandlerFailure.get.fold { + implicit val batchTraceContext: TraceContext = TraceContext.ofBatch(eventBatch)(logger) + val lastSc = eventBatchNE.last1.counter + val firstEvent = eventBatchNE.head1 + val firstSc = firstEvent.counter + + logger.debug( + s"Passing ${eventBatch.size} events to the application handler ${eventHandler.name}." + ) + // Measure only the synchronous part of the application handler so that we see how much the application handler + // contributes to the sequential processing bottleneck. + val asyncResultFT = + Try( + Timed + .future(metrics.applicationHandle, eventHandler(Traced(eventBatch)).unwrap) + ) + + def putApplicationHandlerFailure( + failure: ApplicationHandlerFailure + ): ApplicationHandlerFailure = { + val alreadyCompleted = applicationHandlerFailure.putIfAbsent(failure) + alreadyCompleted.foreach { earlierFailure => + logger.debug(show"Another event processing has previously failed: $earlierFailure") + } + logger.debug("Clearing the receivedEvents queue to unblock the subscription.") + // Clear the receivedEvents queue, because the thread that inserts new events to the queue may block. + // Clearing the queue is potentially dangerous, because it may result in data loss. + // To prevent that, clear the queue only after setting applicationHandlerFailure. + // - Once the applicationHandlerFailure has been set, any subsequent invocations of this method won't invoke + // the application handler. + // - Ongoing invocations of this method are not affected by clearing the queue, + // because the events processed by the ongoing invocation have been drained from the queue before clearing. + sequencerAggregator.eventQueue.clear() + failure + } + + def handleException( + error: Throwable, + syncProcessing: Boolean, + ): ApplicationHandlerFailure = { + val sync = if (syncProcessing) "Synchronous" else "Asynchronous" + + error match { + case PassiveInstanceException(reason) => + logger.warn( + s"$sync event processing stopped because instance became passive" + ) + putApplicationHandlerFailure(ApplicationHandlerPassive(reason)) + + case _ if isClosing => + logger.info( + s"$sync event processing failed for event batch with sequencer counters $firstSc to $lastSc, most likely due to an ongoing shutdown", + error, + ) + putApplicationHandlerFailure(ApplicationHandlerShutdown) + + case _ => + logger.error( + s"$sync event processing failed for event batch with sequencer counters $firstSc to $lastSc.", + error, + ) + putApplicationHandlerFailure(ApplicationHandlerException(error, firstSc, lastSc)) + } + } + + def handleAsyncResult( + asyncResultF: Future[UnlessShutdown[AsyncResult]] + ): EitherT[Future, ApplicationHandlerFailure, Unit] = + EitherTUtil + .fromFuture(asyncResultF, handleException(_, syncProcessing = true)) + .subflatMap { + case UnlessShutdown.Outcome(asyncResult) => + val asyncSignalledF = asyncResult.unwrap.transform { result => + // record errors and shutdown in `applicationHandlerFailure` and move on + result match { + case Success(outcome) => + outcome + .onShutdown( + putApplicationHandlerFailure(ApplicationHandlerShutdown).discard + ) + .discard + case Failure(error) => + handleException(error, syncProcessing = false).discard + } + Success(UnlessShutdown.unit) + }.unwrap + // note, we are adding our async processing to the flush future, so we know once the async processing has finished + addToFlushAndLogError( + s"asynchronous event processing for event batch with sequencer counters $firstSc to $lastSc" + )(asyncSignalledF) + // we do not wait for the async results to finish, we are done here once the synchronous part is done + Right(()) + case UnlessShutdown.AbortedDueToShutdown => + putApplicationHandlerFailure(ApplicationHandlerShutdown).discard + Left(ApplicationHandlerShutdown) + } + + // note, here, we created the asyncResultF, which means we've completed the synchronous processing part. + asyncResultFT.fold( + error => EitherT.leftT[Future, Unit](handleException(error, syncProcessing = true)), + handleAsyncResult, + ) + }(EitherT.leftT[Future, Unit](_)) + } + + /** Acknowledge that we have successfully processed all events up to and including the given timestamp. + * The client should then never subscribe for events from before this point. + */ + private[client] def acknowledge(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[Unit] = { + val request = AcknowledgeRequest(member, timestamp, protocolVersion) + sequencersTransportState.transport.acknowledge(request) + } + + def acknowledgeSigned(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): EitherT[Future, String, Unit] = { + val request = AcknowledgeRequest(member, timestamp, protocolVersion) + for { + signedRequest <- requestSigner.signRequest(request, HashPurpose.AcknowledgementSignature) + _ <- sequencersTransportState.transport.acknowledgeSigned(signedRequest) + } yield () + } + + def changeTransport( + sequencerTransports: SequencerTransports[?] + )(implicit traceContext: TraceContext): Future[Unit] = { + sequencerAggregator.changeMessageAggregationConfig( + MessageAggregationConfig( + sequencerTransports.expectedSequencers, + sequencerTransports.sequencerTrustThreshold, + ) + ) + sequencersTransportState.changeTransport(sequencerTransports) + } + + /** Future which is completed when the client is not functional any more and is ready to be closed. + * The value with which the future is completed will indicate the reason for completion. + */ + def completion: Future[SequencerClient.CloseReason] = sequencersTransportState.completion + + private def waitForHandlerToComplete(): Unit = { + import TraceContext.Implicits.Empty.* + logger.trace(s"Wait for the handler to become idle") + // This logs a warn if the handle does not become idle within 60 seconds. + // This happen because the handler is not making progress, for example due to a db outage. + valueOrLog( + handlerIdle.get().future, + timeoutMessage = s"Clean close of the sequencer subscriptions timed out", + timeout = timeouts.shutdownProcessing.unwrap, + ).discard + } + + override protected def closeAsync(): Seq[AsyncOrSyncCloseable] = { + Seq( + SyncCloseable("sequencer-aggregator", sequencerAggregator.close()), + SyncCloseable("sequencer-send-tracker", sendTracker.close()), + // see comments above why we need two flushes + flushCloseable("sequencer-client-flush-sync", timeouts.shutdownProcessing), + flushCloseable("sequencer-client-flush-async", timeouts.shutdownProcessing), + SyncCloseable("sequencer-client-subscription", sequencersTransportState.close()), + SyncCloseable("handler-becomes-idle", waitForHandlerToComplete()), + SyncCloseable( + "sequencer-client-periodic-ack", + toCloseableOption(periodicAcknowledgementsRef.get()).close(), + ), + SyncCloseable("sequencer-client-recorder", recorderO.foreach(_.close())), + SyncCloseable("deferred-subscription-health", deferredSubscriptionHealth.close()), + ) + } + + /** Returns a future that completes after asynchronous processing has completed for all events + * whose synchronous processing has been completed prior to this call. May complete earlier if event processing + * has failed. + */ + @VisibleForTesting + def flush(): Future[Unit] = doFlush() + + /** Await the completion of `future`. Log a message if the future does not complete within `timeout`. + * If the `future` fails with an exception within `timeout`, this method rethrows the exception. + * + * Instead of using this method, you should use the respective method on one of the ProcessingTimeouts + * + * @return Optionally the completed value of `future` if it successfully completes in time. + */ + private def valueOrLog[T]( + future: Future[T], + timeoutMessage: => String, + timeout: Duration, + level: Level = Level.WARN, + stackTraceFilter: Thread => Boolean = defaultStackTraceFilter, + )(implicit loggingContext: ErrorLoggingContext): Option[T] = { + // Use Await.ready instead of Await.result to be able to tell the difference between the awaitable throwing a + // TimeoutException and a TimeoutException being thrown because the awaitable is not ready. + val ready = Try(Await.ready(future, timeout)) + ready match { + case Success(awaited) => + val result = awaited.value.getOrElse( + throw new RuntimeException(s"Future $future not completed after successful Await.ready.") + ) + result.fold(throw _, Some(_)) + + case Failure(timeoutExc: TimeoutException) => + val stackTraces = StackTraceUtil.formatStackTrace(stackTraceFilter) + if (stackTraces.isEmpty) + LoggerUtil.logThrowableAtLevel(level, timeoutMessage, timeoutExc) + else + LoggerUtil.logThrowableAtLevel( + level, + s"$timeoutMessage\nStack traces:\n$stackTraces", + timeoutExc, + ) + None + + case Failure(exc) => ErrorUtil.internalError(exc) + } + } +} + +object SequencerClient { + val healthName: String = "sequencer-client" + + final case class SequencerTransportContainer[E]( + sequencerId: SequencerId, + clientTransport: SequencerClientTransport & SequencerClientTransportPekko.Aux[E], + ) + + final case class SequencerTransports[E]( + sequencerToTransportMap: NonEmpty[Map[SequencerAlias, SequencerTransportContainer[E]]], + sequencerTrustThreshold: PositiveInt, + ) { + def expectedSequencers: NonEmpty[Set[SequencerId]] = + sequencerToTransportMap.map(_._2.sequencerId).toSet + + def sequencerIdToTransportMap: NonEmpty[Map[SequencerId, SequencerTransportContainer[E]]] = { + sequencerToTransportMap.map { case (_, transport) => + transport.sequencerId -> transport + }.toMap + } + + def transports: Set[SequencerClientTransport] = + sequencerToTransportMap.values.map(_.clientTransport).toSet + } + + object SequencerTransports { + def from[E]( + sequencerTransportsMap: NonEmpty[ + Map[SequencerAlias, SequencerClientTransport & SequencerClientTransportPekko.Aux[E]] + ], + expectedSequencers: NonEmpty[Map[SequencerAlias, SequencerId]], + sequencerSignatureThreshold: PositiveInt, + ): Either[String, SequencerTransports[E]] = + if (sequencerTransportsMap.keySet != expectedSequencers.keySet) { + Left("Inconsistent map of sequencer transports and their ids.") + } else + Right( + SequencerTransports( + sequencerToTransportMap = + sequencerTransportsMap.map { case (sequencerAlias, transport) => + val sequencerId = expectedSequencers(sequencerAlias) + sequencerAlias -> SequencerTransportContainer(sequencerId, transport) + }.toMap, + sequencerTrustThreshold = sequencerSignatureThreshold, + ) + ) + + def single[E]( + sequencerAlias: SequencerAlias, + sequencerId: SequencerId, + transport: SequencerClientTransport & SequencerClientTransportPekko.Aux[E], + ): SequencerTransports[E] = + SequencerTransports( + NonEmpty + .mk( + Seq, + sequencerAlias -> SequencerTransportContainer(sequencerId, transport), + ) + .toMap, + PositiveInt.tryCreate(1), + ) + + def default[E]( + sequencerId: SequencerId, + transport: SequencerClientTransport & SequencerClientTransportPekko.Aux[E], + ): SequencerTransports[E] = + single(SequencerAlias.Default, sequencerId, transport) + } + + sealed trait CloseReason + + object CloseReason { + + trait ErrorfulCloseReason + + final case class PermissionDenied(cause: String) extends CloseReason + + final case class UnrecoverableError(cause: String) extends ErrorfulCloseReason with CloseReason + + final case class UnrecoverableException(throwable: Throwable) + extends ErrorfulCloseReason + with CloseReason + + case object ClientShutdown extends CloseReason + + case object BecamePassive extends CloseReason + } + + /** Hook for informing tests about replay statistics. + * + * If a [[SequencerClient]] is used with + * [[transports.replay.ReplayingEventsSequencerClientTransport]], the transport + * will add a statistics to this queue whenever a replay attempt has completed successfully. + * + * A test can poll this statistics from the queue to determine whether the replay has completed and to + * get statistics on the replay. + * + * LIMITATION: This is only suitable for manual / sequential test setups, as the statistics are shared through + * a global queue. + */ + @VisibleForTesting + lazy val replayStatistics: BlockingQueue[ReplayStatistics] = new LinkedBlockingQueue() + + final case class ReplayStatistics( + inputPath: Path, + numberOfEvents: Int, + startTime: CantonTimestamp, + duration: JDuration, + ) + + /** Utility to add retries around sends as an attempt to guarantee the send is eventually sequenced. + */ + def sendWithRetries( + sendBatch: SendCallback => EitherT[Future, SendAsyncClientError, Unit], + maxRetries: Int, + delay: FiniteDuration, + sendDescription: String, + errMsg: String, + performUnlessClosing: PerformUnlessClosing, + )(implicit + ec: ExecutionContext, + loggingContext: ErrorLoggingContext, + ): FutureUnlessShutdown[Unit] = { + def doSend(): FutureUnlessShutdown[Unit] = { + val callback = new CallbackFuture() + for { + _ <- FutureUnlessShutdown + .outcomeF( + EitherTUtil.toFuture( + EitherTUtil + .logOnError(sendBatch(callback), errMsg) + .leftMap(err => new RuntimeException(s"$errMsg: $err")) + ) + ) + sendResult <- callback.future + _ <- SendResult.toFutureUnlessShutdown(sendDescription)(sendResult) + } yield () + } + retry + .Pause(loggingContext.logger, performUnlessClosing, maxRetries, delay, sendDescription) + .unlessShutdown(doSend(), AllExnRetryable)( + retry.Success.always, + ec, + loggingContext.traceContext, + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientConfig.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientConfig.scala new file mode 100644 index 0000000000..c03d53dca1 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientConfig.scala @@ -0,0 +1,61 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.config.* +import com.digitalasset.canton.sequencing.authentication.AuthenticationTokenManagerConfig + +/** Client configured options for how to connect to a sequencer + * + * @param eventInboxSize The size of the inbox queue used to store received events. Must be at least one. + * Events in the inbox are processed in parallel. + * A larger inbox may result in higher throughput at the price of higher memory consumption, + * larger database queries, and longer crash recovery. + * @param startupConnectionRetryDelay Initial delay before we attempt to establish an initial connection + * @param initialConnectionRetryDelay Initial delay before a reconnect attempt + * @param warnDisconnectDelay Consider sequencer to be degraded after delay + * @param maxConnectionRetryDelay Maximum delay before a reconnect attempt + * @param handshakeRetryAttempts How many attempts should we make to get a handshake response + * @param handshakeRetryDelay How long to delay between attempts to fetch a handshake response + * @param defaultMaxSequencingTimeOffset if no max-sequencing-time is supplied to send, our current time will be offset by this amount + * @param acknowledgementInterval Controls how frequently the client acknowledges how far it has successfully processed + * to the sequencer which allows the sequencer to remove this data when pruning. + * @param keepAlive keep alive config used for GRPC sequencers + * @param authToken configuration settings for the authentication token manager + * @param optimisticSequencedEventValidation if true, sequenced event signatures will be validated first optimistically + * and only strict if the optimistic evaluation failed. this means that + * for a split second, we might still accept an event signed with a key that + * has just been revoked. + * @param skipSequencedEventValidation if true, sequenced event validation will be skipped. the default setting is false. + * this option should only be enabled if a defective validation is blocking processing. + * therefore, unless you know what you are doing, you shouldn't touch this setting. + * @param overrideMaxRequestSize overrides the maxRequestSize configured in the dynamic domain parameters. If overrideMaxRequestSize, + * is set, modifying the maxRequestSize won't have any effect. + * @param maximumInFlightEventBatches The maximum number of event batches that the system will process concurrently. + * Setting the `maximumInFlightEventBatches` parameter limits the number of event batches that the system will process + * simultaneously, preventing overload and ensuring that the system can handle the workload effectively. A higher value + * of `maximumInFlightEventBatches` can lead to increased throughput, but at the cost of higher memory consumption and + * longer processing times for each batch. A lower value of `maximumInFlightEventBatches` may limit throughput, but can + * result in more stable and predictable system behavior. + */ +final case class SequencerClientConfig( + eventInboxSize: PositiveInt = PositiveInt.tryCreate(100), + startupConnectionRetryDelay: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(1), + initialConnectionRetryDelay: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofMillis(10), + warnDisconnectDelay: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(5), + maxConnectionRetryDelay: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(30), + handshakeRetryAttempts: NonNegativeInt = NonNegativeInt.tryCreate(50), + handshakeRetryDelay: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(5), + defaultMaxSequencingTimeOffset: NonNegativeFiniteDuration = + NonNegativeFiniteDuration.ofMinutes(5), + acknowledgementInterval: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofMinutes(1), + keepAliveClient: Option[KeepAliveClientConfig] = Some(KeepAliveClientConfig()), + authToken: AuthenticationTokenManagerConfig = AuthenticationTokenManagerConfig(), + // TODO(#10040) remove optimistic validation + optimisticSequencedEventValidation: Boolean = true, + skipSequencedEventValidation: Boolean = false, + overrideMaxRequestSize: Option[NonNegativeInt] = None, + maximumInFlightEventBatches: PositiveInt = PositiveInt.tryCreate(20), +) diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientFactory.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientFactory.scala new file mode 100644 index 0000000000..536b87fb6f --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientFactory.scala @@ -0,0 +1,343 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import cats.data.EitherT +import com.daml.grpc.adapter.ExecutionSequencerFactory +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.common.domain.ServiceAgreementId +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.config.* +import com.digitalasset.canton.crypto.{Crypto, SyncCryptoApi, SyncCryptoClient} +import com.digitalasset.canton.lifecycle.* +import com.digitalasset.canton.logging.{ + ErrorLoggingContext, + NamedLoggerFactory, + NamedLoggingContext, +} +import com.digitalasset.canton.metrics.SequencerClientMetrics +import com.digitalasset.canton.networking.Endpoint +import com.digitalasset.canton.networking.grpc.ClientChannelBuilder +import com.digitalasset.canton.protocol.{DomainParametersLookup, StaticDomainParameters} +import com.digitalasset.canton.sequencing.* +import com.digitalasset.canton.sequencing.client.ReplayAction.{SequencerEvents, SequencerSends} +import com.digitalasset.canton.sequencing.client.SequencerClient.SequencerTransports +import com.digitalasset.canton.sequencing.client.grpc.GrpcSequencerChannelBuilder +import com.digitalasset.canton.sequencing.client.transports.* +import com.digitalasset.canton.sequencing.client.transports.replay.{ + ReplayingEventsSequencerClientTransport, + ReplayingSendsSequencerClientTransportImpl, + ReplayingSendsSequencerClientTransportPekko, +} +import com.digitalasset.canton.sequencing.handshake.SequencerHandshake +import com.digitalasset.canton.store.* +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.client.DomainTopologyClient +import com.digitalasset.canton.tracing.{TraceContext, TracingConfig} +import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.{SequencerAlias, SequencerCounter} +import io.grpc.{CallOptions, ManagedChannel} +import io.opentelemetry.api.trace.Tracer +import org.apache.pekko.stream.Materializer + +import scala.concurrent.* + +trait SequencerClientFactory { + def create( + member: Member, + sequencedEventStore: SequencedEventStore, + sendTrackerStore: SendTrackerStore, + requestSigner: RequestSigner, + sequencerConnections: SequencerConnections, + expectedSequencers: NonEmpty[Map[SequencerAlias, SequencerId]], + )(implicit + executionContext: ExecutionContextExecutor, + executionSequencerFactory: ExecutionSequencerFactory, + materializer: Materializer, + tracer: Tracer, + traceContext: TraceContext, + ): EitherT[Future, String, SequencerClient] + +} + +object SequencerClientFactory { + def apply( + domainId: DomainId, + syncCryptoApi: SyncCryptoClient[SyncCryptoApi], + crypto: Crypto, + agreedAgreementId: Option[ServiceAgreementId], + config: SequencerClientConfig, + traceContextPropagation: TracingConfig.Propagation, + testingConfig: TestingConfigInternal, + domainParameters: StaticDomainParameters, + processingTimeout: ProcessingTimeout, + clock: Clock, + topologyClient: DomainTopologyClient, + futureSupervisor: FutureSupervisor, + recordingConfigForMember: Member => Option[RecordingConfig], + replayConfigForMember: Member => Option[ReplayConfig], + metrics: SequencerClientMetrics, + loggingConfig: LoggingConfig, + loggerFactory: NamedLoggerFactory, + supportedProtocolVersions: Seq[ProtocolVersion], + minimumProtocolVersion: Option[ProtocolVersion], + ): SequencerClientFactory with SequencerClientTransportFactory = + new SequencerClientFactory with SequencerClientTransportFactory { + + override def create( + member: Member, + sequencedEventStore: SequencedEventStore, + sendTrackerStore: SendTrackerStore, + requestSigner: RequestSigner, + sequencerConnections: SequencerConnections, + expectedSequencers: NonEmpty[Map[SequencerAlias, SequencerId]], + )(implicit + executionContext: ExecutionContextExecutor, + executionSequencerFactory: ExecutionSequencerFactory, + materializer: Materializer, + tracer: Tracer, + traceContext: TraceContext, + ): EitherT[Future, String, SequencerClient] = { + // initialize recorder if it's been configured for the member (should only be used for testing) + val recorderO = recordingConfigForMember(member).map { recordingConfig => + new SequencerClientRecorder( + recordingConfig.fullFilePath, + processingTimeout, + loggerFactory, + ) + } + val sequencerDomainParamsLookup = DomainParametersLookup.forSequencerDomainParameters( + domainParameters, + config.overrideMaxRequestSize, + topologyClient, + futureSupervisor, + loggerFactory, + ) + + for { + sequencerTransportsMap <- makeTransport( + sequencerConnections, + member, + requestSigner, + ) + + sequencerTransports <- EitherT.fromEither[Future]( + SequencerTransports.from( + sequencerTransportsMap, + expectedSequencers, + sequencerConnections.sequencerTrustThreshold, + ) + ) + + // fetch the initial set of pending sends to initialize the client with. + // as it owns the client that should be writing to this store it should not be racy. + initialPendingSends <- EitherT.right(sendTrackerStore.fetchPendingSends) + sendTracker = new SendTracker( + initialPendingSends, + sendTrackerStore, + metrics, + loggerFactory, + processingTimeout, + ) + // pluggable send approach to support transitioning to the new async sends + validatorFactory = new SequencedEventValidatorFactory { + override def create( + unauthenticated: Boolean + )(implicit loggingContext: NamedLoggingContext): SequencedEventValidator = + if (config.skipSequencedEventValidation) { + SequencedEventValidator.noValidation(domainId) + } else { + new SequencedEventValidatorImpl( + unauthenticated, + config.optimisticSequencedEventValidation, + domainId, + domainParameters.protocolVersion, + syncCryptoApi, + loggerFactory, + processingTimeout, + ) + } + } + } yield new SequencerClientImpl( + domainId, + member, + sequencerTransports, + config, + testingConfig, + domainParameters.protocolVersion, + sequencerDomainParamsLookup, + processingTimeout, + validatorFactory, + clock, + requestSigner, + sequencedEventStore, + sendTracker, + metrics, + recorderO, + replayConfigForMember(member).isDefined, + syncCryptoApi.pureCrypto, + loggingConfig, + loggerFactory, + futureSupervisor, + SequencerCounter.Genesis, + ) + } + + override def makeTransport( + connection: SequencerConnection, + member: Member, + requestSigner: RequestSigner, + )(implicit + executionContext: ExecutionContextExecutor, + executionSequencerFactory: ExecutionSequencerFactory, + materializer: Materializer, + traceContext: TraceContext, + ): EitherT[Future, String, SequencerClientTransport & SequencerClientTransportPekko] = { + def mkRealTransport(): SequencerClientTransport & SequencerClientTransportPekko = + connection match { + case grpc: GrpcSequencerConnection => grpcTransport(grpc, member) + } + + val transport: SequencerClientTransport & SequencerClientTransportPekko = + replayConfigForMember(member) match { + case None => mkRealTransport() + case Some(ReplayConfig(recording, SequencerEvents)) => + new ReplayingEventsSequencerClientTransport( + domainParameters.protocolVersion, + recording.fullFilePath, + processingTimeout, + loggerFactory, + ) + case Some(ReplayConfig(recording, replaySendsConfig: SequencerSends)) => + if (replaySendsConfig.usePekko) { + val underlyingTransport = mkRealTransport() + new ReplayingSendsSequencerClientTransportPekko( + domainParameters.protocolVersion, + recording.fullFilePath, + replaySendsConfig, + member, + underlyingTransport, + requestSigner, + metrics, + processingTimeout, + loggerFactory, + ) + } else { + val underlyingTransport = mkRealTransport() + new ReplayingSendsSequencerClientTransportImpl( + domainParameters.protocolVersion, + recording.fullFilePath, + replaySendsConfig, + member, + underlyingTransport, + requestSigner, + metrics, + processingTimeout, + loggerFactory, + ) + } + } + + for { + // handshake to check that sequencer client supports the protocol version required by the sequencer + _ <- SequencerHandshake + .handshake( + supportedProtocolVersions, + minimumProtocolVersion, + transport, + config, + processingTimeout, + loggerFactory, + ) + .leftMap { error => + // make sure to close transport in case of handshake failure + transport.close() + error + } + } yield transport + } + + private def createChannel(conn: GrpcSequencerConnection)(implicit + executionContext: ExecutionContextExecutor + ): ManagedChannel = { + val channelBuilder = ClientChannelBuilder(loggerFactory) + GrpcSequencerChannelBuilder( + channelBuilder, + conn, + NonNegativeInt.maxValue, // we set this limit only on the sequencer node, to avoid restarting the client if this value is changed + traceContextPropagation, + config.keepAliveClient, + ) + } + + /** the wait-for-ready call option is added for when round-robin-ing through connections + * so that if one of them gets closed, we try the next one instead of unnecessarily failing. + * wait-for-ready semantics: https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md + * this is safe for non-idempotent RPCs. + */ + private def callOptionsForEndpoints(endpoints: NonEmpty[Seq[Endpoint]]): CallOptions = + if (endpoints.length > 1) CallOptions.DEFAULT.withWaitForReady() + else CallOptions.DEFAULT + + private def grpcSequencerClientAuth( + connection: GrpcSequencerConnection, + member: Member, + )(implicit executionContext: ExecutionContextExecutor): GrpcSequencerClientAuth = { + val channelPerEndpoint = connection.endpoints.map { endpoint => + val subConnection = connection.copy(endpoints = NonEmpty.mk(Seq, endpoint)) + endpoint -> createChannel(subConnection) + }.toMap + new GrpcSequencerClientAuth( + domainId, + member, + crypto, + agreedAgreementId, + channelPerEndpoint, + supportedProtocolVersions, + config.authToken, + clock, + processingTimeout, + loggerFactory, + ) + } + + private def grpcTransport(connection: GrpcSequencerConnection, member: Member)(implicit + executionContext: ExecutionContextExecutor, + executionSequencerFactory: ExecutionSequencerFactory, + materializer: Materializer, + ): SequencerClientTransport & SequencerClientTransportPekko = { + val channel = createChannel(connection) + val auth = grpcSequencerClientAuth(connection, member) + val callOptions = callOptionsForEndpoints(connection.endpoints) + new GrpcSequencerClientTransportPekko( + channel, + callOptions, + auth, + metrics, + processingTimeout, + loggerFactory, + domainParameters.protocolVersion, + ) + } + + def validateTransport( + connection: SequencerConnection, + logWarning: Boolean, + )(implicit + executionContext: ExecutionContextExecutor, + errorLoggingContext: ErrorLoggingContext, + closeContext: CloseContext, + ): EitherT[FutureUnlessShutdown, String, Unit] = + SequencerClientTransportFactory.validateTransport( + connection, + traceContextPropagation, + config, + logWarning, + loggerFactory, + ) + + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientSend.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientSend.scala new file mode 100644 index 0000000000..fff886e9ad --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientSend.scala @@ -0,0 +1,62 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import cats.data.EitherT +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.protocol.messages.DefaultOpenEnvelope +import com.digitalasset.canton.sequencing.protocol.{AggregationRule, Batch, MessageId} +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.Future + +trait SequencerClientSend { + + /** Sends a request to sequence a deliver event to the sequencer. + * If we fail to make the request to the sequencer and are certain that it was not received by the sequencer an + * error is returned. In this circumstance it is safe for the caller to retry the request without causing a duplicate + * request. + * A successful response however does not mean that the request will be successfully sequenced. Instead the caller + * must subscribe to the sequencer and can observe one of the following outcomes: + * 1. A deliver event is sequenced with a messageId matching this send. + * 2. A deliver error is sequenced with a messageId matching this send. + * 3. The sequencing time progresses beyond the provided max-sequencing-time. The caller can assume that the send + * will now never be sequenced. + * Callers should be aware that a message-id can be reused once one of these outcomes is observed so cannot assume + * that an event with a matching message-id at any point in the future matches their send. Use the `sendTracker` to + * aid tracking timeouts for events (if useful this could be enriched in the future to provide send completion + * callbacks alongside the existing timeout notifications). + * For convenience callers can provide a callback that the SendTracker will invoke when the outcome of the send + * is known. However this convenience comes with significant limitations that a caller must understand: + * - the callback has no ability to be persisted so will be lost after a restart or recreation of the SequencerClient + * - the callback is called by the send tracker while handling an event from a SequencerSubscription. + * If the callback returns an error this will be returned to the underlying subscription handler and shutdown the sequencer + * client. If handlers do not want to halt the sequencer subscription errors should be appropriately handled + * (particularly logged) and a successful value returned from the callback. + * - If witnessing an event causes many prior sends to timeout there is no guaranteed order in which the + * callbacks of these sends will be notified. + * - If replay is enabled, the callback will be called immediately with a fake `SendResult`. + * For more robust send result tracking callers should persist metadata about the send they will make and + * monitor the sequenced events when read, so actions can be taken even if in-memory state is lost. + */ + def sendAsync( + batch: Batch[DefaultOpenEnvelope], + sendType: SendType = SendType.Other, + timestampOfSigningKey: Option[CantonTimestamp] = None, + maxSequencingTime: CantonTimestamp = generateMaxSequencingTime, + messageId: MessageId = generateMessageId, + aggregationRule: Option[AggregationRule] = None, + callback: SendCallback = SendCallback.empty, + )(implicit traceContext: TraceContext): EitherT[Future, SendAsyncClientError, Unit] + + /** Provides a value for max-sequencing-time to use for `sendAsync` if no better application provided timeout is available. + * Is currently a configurable offset from our clock. + */ + def generateMaxSequencingTime: CantonTimestamp + + /** Generates a message id. + * The message id is only for correlation within this client and does not need to be globally unique. + */ + def generateMessageId: MessageId = MessageId.randomMessageId() +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientSubscriptionError.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientSubscriptionError.scala new file mode 100644 index 0000000000..7d63776df9 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientSubscriptionError.scala @@ -0,0 +1,52 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import com.digitalasset.canton.SequencerCounter +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.sequencing.SequencerAggregator.SequencerAggregatorError + +sealed trait SequencerClientSubscriptionError extends Product with Serializable { + def mbException: Option[Throwable] = None +} + +object SequencerClientSubscriptionError { + final case class EventAggregationError(error: SequencerAggregatorError) + extends SequencerClientSubscriptionError + + final case class EventValidationError(error: SequencedEventValidationError[Nothing]) + extends SequencerClientSubscriptionError + + sealed trait ApplicationHandlerFailure + extends SequencerClientSubscriptionError + with PrettyPrinting + + /** The application handler returned that it is being shutdown. */ + case object ApplicationHandlerShutdown extends ApplicationHandlerFailure { + override def pretty: Pretty[ApplicationHandlerShutdown.type] = + prettyOfObject[ApplicationHandlerShutdown.type] + } + + /** The application handler returned that it is being passive. */ + final case class ApplicationHandlerPassive(reason: String) extends ApplicationHandlerFailure { + override def pretty: Pretty[ApplicationHandlerPassive] = + prettyOfClass(param("reason", _.reason.unquoted)) + } + + /** The application handler threw an exception while processing the event (synchronously or asynchronously) */ + final case class ApplicationHandlerException( + exception: Throwable, + firstSequencerCounter: SequencerCounter, + lastSequencerCounter: SequencerCounter, + ) extends ApplicationHandlerFailure { + override def mbException: Option[Throwable] = Some(exception) + + override def pretty: Pretty[ApplicationHandlerException] = prettyOfClass( + param("first sequencer counter", _.firstSequencerCounter), + param("last sequencer counter", _.lastSequencerCounter), + unnamedParam(_.exception), + ) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientSubscriptionException.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientSubscriptionException.scala new file mode 100644 index 0000000000..694fe9fe46 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientSubscriptionException.scala @@ -0,0 +1,11 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +@SuppressWarnings(Array("org.wartremover.warts.Null")) +final case class SequencerClientSubscriptionException(error: SequencerClientSubscriptionError) + extends RuntimeException( + s"Handling of sequencer event failed with error: $error", + error.mbException.orNull, + ) diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientTransportFactory.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientTransportFactory.scala new file mode 100644 index 0000000000..68e02a769c --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientTransportFactory.scala @@ -0,0 +1,181 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import cats.data.EitherT +import com.daml.grpc.adapter.ExecutionSequencerFactory +import com.daml.nonempty.{NonEmpty, NonEmptyUtil} +import com.digitalasset.canton.SequencerAlias +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.lifecycle.* +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory} +import com.digitalasset.canton.networking.grpc.ClientChannelBuilder +import com.digitalasset.canton.sequencing.* +import com.digitalasset.canton.sequencing.client.SequencerClientTransportFactory.ValidateTransportResult +import com.digitalasset.canton.sequencing.client.grpc.GrpcSequencerChannelBuilder +import com.digitalasset.canton.sequencing.client.transports.* +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.tracing.{TraceContext, TracingConfig} +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.* +import com.digitalasset.canton.util.retry.RetryUtil.NoExnRetryable +import io.grpc.ConnectivityState +import org.apache.pekko.stream.Materializer + +import scala.concurrent.* + +trait SequencerClientTransportFactory { + + def makeTransport( + sequencerConnections: SequencerConnections, + member: Member, + requestSigner: RequestSigner, + )(implicit + executionContext: ExecutionContextExecutor, + executionSequencerFactory: ExecutionSequencerFactory, + materializer: Materializer, + traceContext: TraceContext, + ): EitherT[Future, String, NonEmpty[ + Map[SequencerAlias, SequencerClientTransport & SequencerClientTransportPekko] + ]] = + MonadUtil + .sequentialTraverse(sequencerConnections.connections)(conn => + makeTransport(conn, member, requestSigner) + .map(transport => conn.sequencerAlias -> transport) + ) + .map(transports => NonEmptyUtil.fromUnsafe(transports.toMap)) + + def validateTransport( + sequencerConnections: SequencerConnections, + logWarning: Boolean, + )(implicit + executionContext: ExecutionContextExecutor, + errorLoggingContext: ErrorLoggingContext, + closeContext: CloseContext, + ): EitherT[FutureUnlessShutdown, String, Unit] = + MonadUtil + .sequentialTraverse(sequencerConnections.connections)(conn => + validateTransport(conn, logWarning) + .transform { + case Right(_) => Right(ValidateTransportResult.Valid) + case Left(error) => Right(ValidateTransportResult.NotValid(error)) + } + ) + .flatMap(checkAgainstTrustThreshold(sequencerConnections.sequencerTrustThreshold, _)) + + private def checkAgainstTrustThreshold( + sequencerTrustThreshold: PositiveInt, + results: Seq[ValidateTransportResult], + )(implicit + executionContext: ExecutionContextExecutor + ): EitherT[FutureUnlessShutdown, String, Unit] = EitherT.fromEither[FutureUnlessShutdown] { + if (results.count(_ == ValidateTransportResult.Valid) >= sequencerTrustThreshold.unwrap) + Right(()) + else { + val errors = results + .collect { case ValidateTransportResult.NotValid(message) => message } + Left(errors.mkString(", ")) + } + } + + def makeTransport( + connection: SequencerConnection, + member: Member, + requestSigner: RequestSigner, + )(implicit + executionContext: ExecutionContextExecutor, + executionSequencerFactory: ExecutionSequencerFactory, + materializer: Materializer, + traceContext: TraceContext, + ): EitherT[Future, String, SequencerClientTransport & SequencerClientTransportPekko] + + def validateTransport( + connection: SequencerConnection, + logWarning: Boolean, + )(implicit + executionContext: ExecutionContextExecutor, + errorLoggingContext: ErrorLoggingContext, + closeContext: CloseContext, + ): EitherT[FutureUnlessShutdown, String, Unit] + +} + +object SequencerClientTransportFactory { + sealed trait ValidateTransportResult extends Product with Serializable + object ValidateTransportResult { + final case object Valid extends ValidateTransportResult + final case class NotValid(message: String) extends ValidateTransportResult + } + + def validateTransport( + connection: SequencerConnection, + traceContextPropagation: TracingConfig.Propagation, + config: SequencerClientConfig, + logWarning: Boolean, + loggerFactory: NamedLoggerFactory, + )(implicit + executionContext: ExecutionContextExecutor, + errorLoggingContext: ErrorLoggingContext, + closeContext: CloseContext, + ): EitherT[FutureUnlessShutdown, String, Unit] = connection match { + case conn: GrpcSequencerConnection => + implicit val traceContext = errorLoggingContext.traceContext + errorLoggingContext.logger.info(s"Validating sequencer connection ${conn}") + val channelBuilder = ClientChannelBuilder(loggerFactory) + val channel = GrpcSequencerChannelBuilder( + channelBuilder, + conn, + NonNegativeInt.maxValue, + traceContextPropagation, + config.keepAliveClient, + ) + def closeChannel(): Unit = { + Lifecycle.close( + Lifecycle.toCloseableChannel( + channel, + errorLoggingContext.logger, + "sequencer-connection-test-channel", + ) + )( + errorLoggingContext.logger + ) + } + // clientConfig.handshakeRetryDelay.underlying.fromNow, + val retryMs = config.initialConnectionRetryDelay.asFiniteApproximation + val attempts = config.handshakeRetryDelay.underlying.toMillis / retryMs.toMillis + def check(): EitherT[Future, String, Unit] = { + channel.getState(true) match { + case ConnectivityState.READY => + errorLoggingContext.logger.info(s"Successfully connected to sequencer at ${conn}") + EitherT.rightT(()) + case other => + val msg = s"Unable to connect to sequencer at ${conn}: channel is ${other}" + errorLoggingContext.debug(msg) + EitherT.leftT(msg) + } + } + val name = "check-valid-sequencer-connection" + EitherT( + retry + .Pause( + errorLoggingContext.logger, + closeContext.context, + maxRetries = attempts.toInt, + delay = retryMs, + operationName = name, + ) + .unlessShutdown( + closeContext.context.performUnlessClosingF(name)(check().value), + NoExnRetryable, + ) + ).thereafter { _ => + closeChannel() + }.leftMap { res => + if (logWarning) { + errorLoggingContext.logger.warn(res) + } + res + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerSubscription.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerSubscription.scala new file mode 100644 index 0000000000..d34a8f0f01 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerSubscription.scala @@ -0,0 +1,119 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import com.daml.error.{ErrorCategory, ErrorCode, Explanation, Resolution} +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.error.CantonError +import com.digitalasset.canton.error.CantonErrorGroups.SequencerSubscriptionErrorGroup +import com.digitalasset.canton.lifecycle.{AsyncCloseable, AsyncOrSyncCloseable, FlagCloseableAsync} +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLogging} +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.{Future, Promise} + +/** Why did the sequencer subscription terminate */ +sealed trait SubscriptionCloseReason[+E] + +object SubscriptionCloseReason { + + final case class HandlerError[E](error: E) extends SubscriptionCloseReason[E] + + /** The handler threw an exception */ + final case class HandlerException(exception: Throwable) extends SubscriptionCloseReason[Nothing] + + /** The subscription itself failed. + * [[transports.SequencerClientTransport]] implementations are expected to provide their own hierarchy of errors + * and supply a matching [[SubscriptionErrorRetryPolicy]] to the [[SequencerClient]] for determining which + * errors are appropriate for attempting to resume a subscription. + */ + trait SubscriptionError extends SubscriptionCloseReason[Nothing] + + /** The subscription was denied + * Implementations are expected to provide their own error of this type + */ + trait PermissionDeniedError extends SubscriptionCloseReason[Nothing] + + /** The sequencer connection details are being updated, so the subscription is being closed so another one + * is created with the updated transport. + * This is not an error and also not a reason to close the sequencer client. + */ + case object TransportChange extends SubscriptionCloseReason[Nothing] + + /** The subscription was closed by the client. */ + case object Closed extends SubscriptionCloseReason[Nothing] + + /** The subscription was closed due to an ongoing shutdown procedure. */ + case object Shutdown extends SubscriptionCloseReason[Nothing] +} + +/** A running subscription to a sequencer. + * Can be closed by the consumer or the producer. + * Once closed the [[closeReason]] value will be fulfilled with the reason the subscription was closed. + * Implementations are expected to immediately start their subscription unless otherwise stated. + * If close is called while the handler is running closeReason should not be completed until the handler has completed. + */ +trait SequencerSubscription[HandlerError] extends FlagCloseableAsync with NamedLogging { + + protected val closeReasonPromise: Promise[SubscriptionCloseReason[HandlerError]] = + Promise[SubscriptionCloseReason[HandlerError]]() + + /** Future which is completed when the subscription is closed. + * If the subscription is closed in a healthy state the future will be completed successfully. + * However if the subscription fails for an unexpected reason at runtime the completion should be failed. + */ + val closeReason: Future[SubscriptionCloseReason[HandlerError]] = closeReasonPromise.future + + /** Completes the subscription with the given reason and closes it. */ + private[canton] def complete(reason: SubscriptionCloseReason[HandlerError])(implicit + traceContext: TraceContext + ): Unit + + override protected def closeAsync(): Seq[AsyncOrSyncCloseable] = { + import com.digitalasset.canton.tracing.TraceContext.Implicits.Empty.* + + Seq( + AsyncCloseable( + "sequencer-subscription", { + closeReasonPromise.trySuccess(SubscriptionCloseReason.Closed).discard + closeReasonPromise.future + }, + timeouts.shutdownNetwork, + ) + ) + } + + // We don't want to throw here when closing the subscription fails (e.g in case of timeout) + // If we threw we could short circuit the rest of the cleaning up of the gRPC stream and end up with + // a stalled stream + override def onCloseFailure(e: Throwable): Unit = { + logger.warn("Failed to close sequencer subscription", e)(TraceContext.empty) + } +} + +object SequencerSubscriptionError extends SequencerSubscriptionErrorGroup { + + sealed trait SequencedEventError extends CantonError + + @Explanation( + """This error indicates that a sequencer subscription to a recently onboarded sequencer attempted to read + |an event replaced with a tombstone. A tombstone occurs if the timestamp associated with the event predates + |the validity of the sequencer's signing key. This error results in the sequencer client disconnecting from + |the sequencer.""" + ) + @Resolution( + """Connect to another sequencer with older event history to consume the tombstoned events + |before reconnecting to the recently onboarded sequencer.""" + ) + object TombstoneEncountered + extends ErrorCode( + id = "SEQUENCER_TOMBSTONE_ENCOUNTERED", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + final case class Error(override val cause: String)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl(cause) + with SequencedEventError + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerSubscriptionPekko.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerSubscriptionPekko.scala new file mode 100644 index 0000000000..1fa653ea34 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerSubscriptionPekko.scala @@ -0,0 +1,24 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import com.digitalasset.canton.health.HealthComponent +import com.digitalasset.canton.sequencing.OrdinarySerializedEvent +import com.digitalasset.canton.util.PekkoUtil.WithKillSwitch +import org.apache.pekko.Done +import org.apache.pekko.stream.KillSwitch +import org.apache.pekko.stream.scaladsl.Source + +import scala.concurrent.Future + +/** Wrapper for an Pekko source delivering the stream of sequenced events. + * The [[org.apache.pekko.stream.KillSwitch]] can be used to terminate the stream. + * The materialized [[scala.concurrent.Future]] completes + * after the internal processing in the source has finished + * after having been closed through the [[org.apache.pekko.stream.KillSwitch]]. + */ +final case class SequencerSubscriptionPekko[+E]( + source: Source[WithKillSwitch[Either[E, OrdinarySerializedEvent]], (KillSwitch, Future[Done])], + health: HealthComponent, +) diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerTransportState.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerTransportState.scala new file mode 100644 index 0000000000..87311bc2dc --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerTransportState.scala @@ -0,0 +1,365 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import cats.implicits.catsSyntaxFlatten +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.lifecycle.{FlagCloseable, UnlessShutdown} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.sequencing.client.SequencerClient.{ + SequencerTransportContainer, + SequencerTransports, +} +import com.digitalasset.canton.sequencing.client.SequencerClientSubscriptionError.{ + ApplicationHandlerPassive, + ApplicationHandlerShutdown, +} +import com.digitalasset.canton.sequencing.client.transports.{ + SequencerClientTransport, + SequencerClientTransportCommon, +} +import com.digitalasset.canton.topology.SequencerId +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.{ErrorUtil, MonadUtil} + +import java.util.concurrent.atomic.AtomicReference +import scala.collection.compat.immutable.ArraySeq +import scala.collection.mutable +import scala.concurrent.{ExecutionContext, Future, Promise, blocking} +import scala.util.{Failure, Random, Success, Try} + +trait SequencerTransportLookup { + + /** Returns an arbitrary [[com.digitalasset.canton.sequencing.client.transports.SequencerClientTransportCommon]]. + * Prefers healthy subscriptions to unhealthy ones. + * + * @throws java.lang.IllegalStateException if there are currently no + * [[com.digitalasset.canton.sequencing.client.transports.SequencerClientTransportCommon]]s at all + */ + def transport(implicit traceContext: TraceContext): SequencerClientTransportCommon + + /** Returns the transport for the given [[com.digitalasset.canton.topology.SequencerId]]. + * + * @throws java.lang.IllegalArgumentException if the [[com.digitalasset.canton.topology.SequencerId]] currently has not transport + */ + // TODO(#13789) remove after having switched over to Pekko everywhere + def transport(sequencerId: SequencerId)(implicit + traceContext: TraceContext + ): UnlessShutdown[SequencerClientTransport] +} + +class SequencersTransportState( + initialSequencerTransports: SequencerTransports[?], + val timeouts: ProcessingTimeout, + val loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext) + extends SequencerTransportLookup + with NamedLogging + with FlagCloseable { + + private val random: Random = new Random(1L) + + private val closeReasonPromise = Promise[SequencerClient.CloseReason]() + + def completion: Future[SequencerClient.CloseReason] = closeReasonPromise.future + + private val lock = new Object() + + private val state = new mutable.HashMap[SequencerId, SequencerTransportState]() + + private val sequencerTrustThreshold = + new AtomicReference[PositiveInt](initialSequencerTransports.sequencerTrustThreshold) + + def getSequencerTrustThreshold = sequencerTrustThreshold.get() + + blocking(lock.synchronized { + val sequencerIdToTransportStateMap = initialSequencerTransports.sequencerIdToTransportMap.map { + case (sequencerId, transport) => + (sequencerId, SequencerTransportState(transport)) + } + state.addAll(sequencerIdToTransportStateMap).discard + }) + + private def transportState( + sequencerId: SequencerId + )(implicit traceContext: TraceContext): UnlessShutdown[SequencerTransportState] = + performUnlessClosing(functionFullName)(blocking(lock.synchronized { + state.getOrElse( + sequencerId, + ErrorUtil.internalError( + new IllegalArgumentException(s"sequencerId=$sequencerId is unknown") + ), + ) + })) + + private def updateTransport( + sequencerId: SequencerId, + updatedTransport: SequencerTransportContainer[?], + )(implicit traceContext: TraceContext): UnlessShutdown[SequencerTransportState] = + performUnlessClosing(functionFullName) { + blocking(lock.synchronized { + transportState(sequencerId).map { transportStateBefore => + state + .put(sequencerId, transportStateBefore.withTransport(updatedTransport)) + .discard + transportStateBefore + } + }) + }.flatten + + override def transport(implicit traceContext: TraceContext): SequencerClientTransportCommon = + blocking(lock.synchronized { + // Pick a random healthy sequencer to send to. + // We can use a plain Random instance across all threads calling this method, + // because this method anyway uses locking on its own. + // (In general, ThreadLocalRandom would void contention on the random number generation, but + // the plain Random has the advantage that we can hard-code the seed so that the chosen sequencers + // are easier to reproduce for debugging and tests.) + val healthySequencers = state.view + .collect { case (_sequencerId, state) if state.isSubscriptionHealthy => state } + .to(ArraySeq) + val chosenSequencer = + if (healthySequencers.isEmpty) + // TODO(i12377): Can we fallback to first sequencer transport here or should we + // introduce EitherT and propagate error handling? + state.values.headOption + .getOrElse( + // TODO(i12377): Error handling + ErrorUtil.invalidState("No sequencer subscription at the moment. Try again later.") + ) + else healthySequencers(random.nextInt(healthySequencers.size)) + chosenSequencer.transport.clientTransport + }) + + override def transport(sequencerId: SequencerId)(implicit + traceContext: TraceContext + ): UnlessShutdown[SequencerClientTransport] = + transportState(sequencerId).map(_.transport.clientTransport) + + def addSubscription( + sequencerId: SequencerId, + subscription: ResilientSequencerSubscription[ + SequencerClientSubscriptionError + ], + eventValidator: SequencedEventValidator, + )(implicit traceContext: TraceContext): Unit = + performUnlessClosing(functionFullName) { + blocking(lock.synchronized { + transportState(sequencerId) + .map { currentSequencerTransportStateForAlias => + if (currentSequencerTransportStateForAlias.subscription.nonEmpty) { + // there's an existing subscription! + logger.warn( + "Cannot create additional subscriptions to the sequencer from the same client" + ) + sys.error( + s"The sequencer client already has a running subscription for sequencerAlias=$sequencerId" + ) + } + subscription.closeReason.onComplete(closeWithSubscriptionReason(sequencerId)) + + state + .put( + sequencerId, + currentSequencerTransportStateForAlias.withSubscription( + subscription, + eventValidator, + ), + ) + .discard + } + .onShutdown(()) + }) + }.onShutdown(()) + + def changeTransport( + sequencerTransports: SequencerTransports[?] + )(implicit traceContext: TraceContext): Future[Unit] = blocking(lock.synchronized { + sequencerTrustThreshold.set(sequencerTransports.sequencerTrustThreshold) + val oldSequencerIds = state.keySet.toSet + val newSequencerIds = sequencerTransports.sequencerIdToTransportMap.keySet + + val newValues: Set[SequencerId] = newSequencerIds.diff(oldSequencerIds) + val removedValues: Set[SequencerId] = oldSequencerIds.diff(newSequencerIds) + val keptValues: Set[SequencerId] = oldSequencerIds.intersect(newSequencerIds) + + if (newValues.nonEmpty || removedValues.nonEmpty) { + ErrorUtil.internalErrorAsync( + new IllegalArgumentException( + "Adding or removing sequencer subscriptions is not supported at the moment" + ) + ) + } else + MonadUtil + .sequentialTraverse_(keptValues.toSeq) { sequencerId => + updateTransport(sequencerId, sequencerTransports.sequencerIdToTransportMap(sequencerId)) + .map { transportStateBefore => + transportStateBefore.subscription + .map(_.resilientSequencerSubscription.resubscribeOnTransportChange()) + .getOrElse(Future.unit) + .thereafter { _ => transportStateBefore.transport.clientTransport.close() } + } + .onShutdown(Future.unit) + } + }) + + private def closeSubscription( + sequencerId: SequencerId, + sequencerState: SequencerTransportState, + )(implicit traceContext: TraceContext): Unit = { + logger.debug(s"Closing sequencer subscription $sequencerId...") + sequencerState.subscription.foreach(_.close()) + sequencerState.transport.clientTransport.close() + val closeReason = sequencerState.subscription + .map(_.resilientSequencerSubscription.closeReason) + .getOrElse(Future.unit) + logger.trace(s"Wait for the subscription $sequencerId to complete") + timeouts.shutdownNetwork + .await_(s"closing resilient sequencer client subscription $sequencerId")(closeReason) + } + + def closeAllSubscriptions(): Unit = blocking(lock.synchronized { + import TraceContext.Implicits.Empty.* + + state.toList.foreach { case (sequencerId, subscription) => + closeSubscription(sequencerId, subscription) + } + + closeReasonPromise + .tryComplete(Success(SequencerClient.CloseReason.ClientShutdown)) + .discard + }) + + private def isEnoughSequencersToOperateWithoutSequencer: Boolean = + state.size > sequencerTrustThreshold.get().unwrap + + private def closeWithSubscriptionReason(sequencerId: SequencerId)( + subscriptionCloseReason: Try[SubscriptionCloseReason[SequencerClientSubscriptionError]] + )(implicit traceContext: TraceContext): Unit = { + // TODO(i12076): Consider aggregating the current situation about other sequencers and + // close the sequencer client only in case of not enough healthy sequencers + + val maybeCloseReason: Try[Either[SequencerClient.CloseReason, Unit]] = + subscriptionCloseReason.map[Either[SequencerClient.CloseReason, Unit]] { + case SubscriptionCloseReason.HandlerException(ex) => + Left(SequencerClient.CloseReason.UnrecoverableException(ex)) + case SubscriptionCloseReason.HandlerError(ApplicationHandlerPassive(_reason)) => + Left(SequencerClient.CloseReason.BecamePassive) + case SubscriptionCloseReason.HandlerError(ApplicationHandlerShutdown) => + Left(SequencerClient.CloseReason.ClientShutdown) + case SubscriptionCloseReason.HandlerError(err) => + Left(SequencerClient.CloseReason.UnrecoverableError(s"handler returned error: $err")) + case permissionDenied: SubscriptionCloseReason.PermissionDeniedError => + blocking(lock.synchronized { + if (!isEnoughSequencersToOperateWithoutSequencer) + Left(SequencerClient.CloseReason.PermissionDenied(s"$permissionDenied")) + else { + state.remove(sequencerId).foreach(closeSubscription(sequencerId, _)) + Right(()) + } + }) + case subscriptionError: SubscriptionCloseReason.SubscriptionError => + blocking(lock.synchronized { + if (!isEnoughSequencersToOperateWithoutSequencer) + Left( + SequencerClient.CloseReason.UnrecoverableError( + s"subscription implementation failed: $subscriptionError" + ) + ) + else { + state.remove(sequencerId).foreach(closeSubscription(sequencerId, _)) + Right(()) + } + }) + case SubscriptionCloseReason.Closed => + blocking(lock.synchronized { + if (!isEnoughSequencersToOperateWithoutSequencer) + Left(SequencerClient.CloseReason.ClientShutdown) + else { + state.remove(sequencerId).foreach(closeSubscription(sequencerId, _)) + Right(()) + } + }) + case SubscriptionCloseReason.Shutdown => Left(SequencerClient.CloseReason.ClientShutdown) + case SubscriptionCloseReason.TransportChange => + Right(()) // we don't want to close the sequencer client when changing transport + } + + def complete(reason: Try[SequencerClient.CloseReason]): Unit = + closeReasonPromise.tryComplete(reason).discard + + lazy val closeReason: Try[SequencerClient.CloseReason] = maybeCloseReason.collect { + case Left(error) => + error + } + maybeCloseReason match { + case Failure(_) => complete(closeReason) + case Success(Left(_)) => complete(closeReason) + case Success(Right(())) => + } + } + + override protected def onClosed(): Unit = { + closeAllSubscriptions() + } +} + +final case class SequencerTransportState( + transport: SequencerTransportContainer[?], + subscription: Option[SequencerTransportState.Subscription] = None, +) { + + def withTransport( + newTransport: SequencerTransportContainer[?] + ): SequencerTransportState = { + require( + newTransport.sequencerId == transport.sequencerId, + "SequencerId of the new transport must match", + ) + copy(transport = newTransport) + } + + def withSubscription( + resilientSequencerSubscription: ResilientSequencerSubscription[ + SequencerClientSubscriptionError + ], + eventValidator: SequencedEventValidator, + ): SequencerTransportState = { + require( + subscription.isEmpty, + "Cannot create additional subscriptions to the sequencer from the same client", + ) + copy(subscription = + Some( + SequencerTransportState.Subscription( + resilientSequencerSubscription, + eventValidator, + ) + ) + ) + } + + def isSubscriptionHealthy: Boolean = subscription.exists { subscription => + !subscription.resilientSequencerSubscription.isFailed && !subscription.resilientSequencerSubscription.isClosing + } + +} + +object SequencerTransportState { + final case class Subscription( + resilientSequencerSubscription: ResilientSequencerSubscription[ + SequencerClientSubscriptionError + ], + eventValidator: SequencedEventValidator, + ) { + def close(): Unit = { + eventValidator.close() + resilientSequencerSubscription.close() + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SubscriptionErrorRetryPolicy.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SubscriptionErrorRetryPolicy.scala new file mode 100644 index 0000000000..b881efcb8d --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SubscriptionErrorRetryPolicy.scala @@ -0,0 +1,54 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import com.digitalasset.canton.logging.TracedLogger +import com.digitalasset.canton.sequencing.client.SubscriptionCloseReason.SubscriptionError +import com.digitalasset.canton.tracing.TraceContext + +import scala.reflect.ClassTag + +/** Policy for what errors are considered retryable. + * Each [[transports.SequencerClientTransport]] is expected to supply + * their own policy which can consider error types they have defined. + */ +trait SubscriptionErrorRetryPolicy { + def retryOnError(subscriptionError: SubscriptionError, receivedItems: Boolean)(implicit + traceContext: TraceContext + ): Boolean + + def retryOnException(ex: Throwable, Logger: TracedLogger)(implicit + traceContext: TraceContext + ): Boolean = false +} + +/** Allows implementors to only specify policy for an error hierarchy they've defined. + * Avoids adding type parameters to all sequencer client components. + * TODO(11067): work out if type parameters are really required and if so are they that bad + */ +abstract class CheckedSubscriptionErrorRetryPolicy[SE <: SubscriptionError](implicit + classTag: ClassTag[SE] +) extends SubscriptionErrorRetryPolicy { + override def retryOnError(error: SubscriptionError, receivedItems: Boolean)(implicit + traceContext: TraceContext + ): Boolean = + error match { + case expectedError: SE => retryInternal(expectedError, receivedItems) + case unexpectedType => sys.error(s"Unexpected error type: $unexpectedType") + } + + protected def retryInternal(error: SE, receivedItems: Boolean)(implicit + traceContext: TraceContext + ): Boolean +} + +object SubscriptionErrorRetryPolicy { + + /** Never retry on any error */ + def never: SubscriptionErrorRetryPolicy = new SubscriptionErrorRetryPolicy { + override def retryOnError(subscriptionError: SubscriptionError, receivedItems: Boolean)(implicit + traceContext: TraceContext + ): Boolean = false + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SubscriptionErrorRetryPolicyPekko.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SubscriptionErrorRetryPolicyPekko.scala new file mode 100644 index 0000000000..164198f7ea --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SubscriptionErrorRetryPolicyPekko.scala @@ -0,0 +1,32 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import com.digitalasset.canton.logging.ErrorLoggingContext + +/** Policy for what errors are considered retryable. + * Each [[transports.SequencerClientTransport]] is expected to supply + * their own policy which can consider error types they have defined. + */ +trait SubscriptionErrorRetryPolicyPekko[-E] { + def retryOnError(subscriptionError: E, receivedItems: Boolean)(implicit + loggingContext: ErrorLoggingContext + ): Boolean + + def retryOnException(ex: Throwable)(implicit + loggingContext: ErrorLoggingContext + ): Boolean +} + +object SubscriptionErrorRetryPolicyPekko { + case object never extends SubscriptionErrorRetryPolicyPekko[Any] { + override def retryOnError(subscriptionError: Any, receivedItems: Boolean)(implicit + loggingContext: ErrorLoggingContext + ): Boolean = false + + override def retryOnException(ex: Throwable)(implicit + loggingContext: ErrorLoggingContext + ): Boolean = false + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SubscriptionRetryDelayRule.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SubscriptionRetryDelayRule.scala new file mode 100644 index 0000000000..53342f2145 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SubscriptionRetryDelayRule.scala @@ -0,0 +1,43 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client + +import scala.concurrent.duration.FiniteDuration + +/** Calculator for how to select the next retry duration and specifies what duration is enough to log a warning. */ +trait SubscriptionRetryDelayRule { + + /** What should the first delay be */ + val initialDelay: FiniteDuration + + /** If we retry for a duration greater than this value then a warning will be logged */ + val warnDelayDuration: FiniteDuration + + /** Calculate the next retry delay given the prior and knowing whether an event has been received on the last subscription (suggesting that it did successfully connect and read). */ + def nextDelay(previousDelay: FiniteDuration, hasReceivedEvent: Boolean): FiniteDuration +} + +object SubscriptionRetryDelayRule { + def apply( + initialRetryDelay: FiniteDuration, + warnDelay: FiniteDuration, + maxRetryDelay: FiniteDuration, + ): SubscriptionRetryDelayRule = + new SubscriptionRetryDelayRule { + + override val initialDelay: FiniteDuration = initialRetryDelay + override val warnDelayDuration: FiniteDuration = warnDelay + + override def nextDelay( + previousDelay: FiniteDuration, + hasReceivedEvent: Boolean, + ): FiniteDuration = + // reset delay + if (hasReceivedEvent) initialRetryDelay + else { + // increase delay + (previousDelay * 2) min maxRetryDelay + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/grpc/GrpcSequencerChannelBuilder.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/grpc/GrpcSequencerChannelBuilder.scala new file mode 100644 index 0000000000..565d95ce8a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/grpc/GrpcSequencerChannelBuilder.scala @@ -0,0 +1,36 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client.grpc + +import cats.syntax.option.* +import com.digitalasset.canton.config.KeepAliveClientConfig +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.networking.grpc.ClientChannelBuilder +import com.digitalasset.canton.sequencing.GrpcSequencerConnection +import com.digitalasset.canton.tracing.TracingConfig.Propagation +import io.grpc.ManagedChannel + +import java.util.concurrent.Executor + +/** Create a GRPC channel to use for the sequencer client and sequencer administrative operations */ +object GrpcSequencerChannelBuilder { + def apply( + clientChannelBuilder: ClientChannelBuilder, + connection: GrpcSequencerConnection, + maxRequestSize: NonNegativeInt, + traceContextPropagation: Propagation, + keepAlive: Option[KeepAliveClientConfig] = Some(KeepAliveClientConfig()), + )(implicit executor: Executor): ManagedChannel = + clientChannelBuilder + .create( + connection.endpoints, + connection.transportSecurity, + executor, + connection.customTrustCertificates, + traceContextPropagation, + maxRequestSize.some, + keepAlive, + ) + .build() +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/package.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/package.scala new file mode 100644 index 0000000000..7631faef04 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/package.scala @@ -0,0 +1,17 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing + +import com.digitalasset.canton.lifecycle.UnlessShutdown + +package object client { + + /** Signature for callbacks provided to the send operation to take advantage of the SendTracker to provide + * tracking of the eventual send result. Callback is ephemeral and will be lost if the SequencerClient is recreated + * or the process exits. + * @see [[SequencerClient.sendAsync]] + */ + type SendCallback = UnlessShutdown[SendResult] => Unit + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerClientAuth.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerClientAuth.scala new file mode 100644 index 0000000000..2ebc53161e --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerClientAuth.scala @@ -0,0 +1,99 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client.transports + +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.common.domain.ServiceAgreementId +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.crypto.Crypto +import com.digitalasset.canton.domain.api.v0.SequencerAuthenticationServiceGrpc.SequencerAuthenticationServiceStub +import com.digitalasset.canton.lifecycle.Lifecycle.CloseableChannel +import com.digitalasset.canton.lifecycle.{FlagCloseable, Lifecycle} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.networking.Endpoint +import com.digitalasset.canton.sequencing.authentication.grpc.{ + SequencerClientNoAuthentication, + SequencerClientTokenAuthentication, +} +import com.digitalasset.canton.sequencing.authentication.{ + AuthenticationTokenManagerConfig, + AuthenticationTokenProvider, +} +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.topology.{ + AuthenticatedMember, + DomainId, + Member, + UnauthenticatedMemberId, +} +import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} +import com.digitalasset.canton.version.ProtocolVersion +import io.grpc.ManagedChannel +import io.grpc.stub.AbstractStub + +import scala.concurrent.ExecutionContext + +/** Auth helpers for the [[GrpcSequencerClientTransport]] when dealing with our custom authentication tokens. */ +class GrpcSequencerClientAuth( + domainId: DomainId, + member: Member, + crypto: Crypto, + agreementId: Option[ServiceAgreementId], + channelPerEndpoint: NonEmpty[Map[Endpoint, ManagedChannel]], + supportedProtocolVersions: Seq[ProtocolVersion], + tokenManagerConfig: AuthenticationTokenManagerConfig, + clock: Clock, + override protected val timeouts: ProcessingTimeout, + protected val loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext) + extends FlagCloseable + with NamedLogging { + + private val tokenProvider = + new AuthenticationTokenProvider( + domainId, + member, + agreementId, + crypto, + supportedProtocolVersions, + tokenManagerConfig, + timeouts, + loggerFactory, + ) + + /** Wrap a grpc client with components to appropriately perform authentication */ + def apply[S <: AbstractStub[S]](client: S): S = { + val obtainTokenPerEndpoint = channelPerEndpoint.transform { case (_, channel) => + val authenticationClient = new SequencerAuthenticationServiceStub(channel) + (tc: TraceContext) => + TraceContextGrpc.withGrpcContext(tc) { + tokenProvider.generateToken(authenticationClient) + } + } + val clientAuthentication = member match { + case unauthenticatedMember: UnauthenticatedMemberId => + new SequencerClientNoAuthentication(domainId, unauthenticatedMember) + case authenticatedMember: AuthenticatedMember => + SequencerClientTokenAuthentication( + domainId, + authenticatedMember, + obtainTokenPerEndpoint, + tokenProvider.isClosing, + tokenManagerConfig, + clock, + loggerFactory, + ) + } + clientAuthentication(client) + } + + override protected def onClosed(): Unit = { + Lifecycle.close( + tokenProvider +: + channelPerEndpoint.toList.map { case (endpoint, channel) => + new CloseableChannel(channel, logger, s"grpc-client-auth-$endpoint") + }: _* + )(logger) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerClientTransport.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerClientTransport.scala new file mode 100644 index 0000000000..75accef1e9 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerClientTransport.scala @@ -0,0 +1,386 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client.transports + +import cats.data.EitherT +import cats.syntax.either.* +import com.daml.grpc.adapter.ExecutionSequencerFactory +import com.daml.grpc.adapter.client.pekko.ClientAdapter +import com.digitalasset.canton.ProtoDeserializationError.ProtoDeserializationFailure +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.domain.api.v0 +import com.digitalasset.canton.domain.api.v0.SequencerConnectServiceGrpc.SequencerConnectServiceStub +import com.digitalasset.canton.domain.api.v0.SequencerServiceGrpc.SequencerServiceStub +import com.digitalasset.canton.lifecycle.Lifecycle +import com.digitalasset.canton.lifecycle.Lifecycle.CloseableChannel +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} +import com.digitalasset.canton.metrics.SequencerClientMetrics +import com.digitalasset.canton.networking.grpc.GrpcError.{ + GrpcClientGaveUp, + GrpcServerError, + GrpcServiceUnavailable, +} +import com.digitalasset.canton.networking.grpc.{CantonGrpcUtil, GrpcError} +import com.digitalasset.canton.sequencing.SerializedEventHandler +import com.digitalasset.canton.sequencing.client.{ + SendAsyncClientError, + SequencerSubscription, + SubscriptionErrorRetryPolicy, +} +import com.digitalasset.canton.sequencing.handshake.HandshakeRequestError +import com.digitalasset.canton.sequencing.protocol.* +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.store.StoredTopologyTransactionX.GenericStoredTopologyTransactionX +import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX +import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc, Traced} +import com.digitalasset.canton.util.EitherTUtil.syntax.* +import com.digitalasset.canton.util.EitherUtil +import io.grpc.Context.CancellableContext +import io.grpc.{CallOptions, Context, ManagedChannel} +import org.apache.pekko.stream.Materializer +import org.apache.pekko.stream.scaladsl.Source + +import java.util.concurrent.atomic.AtomicBoolean +import scala.concurrent.duration.Duration +import scala.concurrent.{ExecutionContext, Future} + +private[transports] abstract class GrpcSequencerClientTransportCommon( + channel: ManagedChannel, + callOptions: CallOptions, + clientAuth: GrpcSequencerClientAuth, + val timeouts: ProcessingTimeout, + protected val loggerFactory: NamedLoggerFactory, +)(implicit + executionContext: ExecutionContext, + esf: ExecutionSequencerFactory, + materializer: Materializer, +) extends SequencerClientTransportCommon + with NamedLogging { + + private val sequencerConnectServiceClient = new SequencerConnectServiceStub(channel) + protected val sequencerServiceClient = clientAuth( + new SequencerServiceStub(channel, options = callOptions) + ) + private val noLoggingShutdownErrorsLogPolicy: GrpcError => TracedLogger => TraceContext => Unit = + err => + logger => + traceContext => + err match { + case _: GrpcClientGaveUp | _: GrpcServerError | _: GrpcServiceUnavailable => + // avoid logging client errors that typically happen during shutdown (such as grpc context cancelled) + performUnlessClosing("grpc-client-transport-log")(err.log(logger)(traceContext))( + traceContext + ).discard + case _ => err.log(logger)(traceContext) + } + + /** Attempt to obtain a handshake response from the sequencer. + * Transports can indicate in the error if the error is transient and could be retried. + */ + override def handshake(request: HandshakeRequest)(implicit + traceContext: TraceContext + ): EitherT[Future, HandshakeRequestError, HandshakeResponse] = + for { + responseP <- CantonGrpcUtil + .sendGrpcRequest(sequencerConnectServiceClient, "sequencer")( + _.handshake(request.toProtoV0), + requestDescription = "handshake", + logger = logger, + retryPolicy = + _ => false, // we'll let the sequencer client decide whether to retry based on the error we return + timeout = timeouts.network.duration, + logPolicy = err => + logger => + traceContext => + logger.debug(s"Failed to handshake with sequencer: $err")(traceContext), + ) + .leftMap(err => HandshakeRequestError(err.toString, err.retry)) + response <- HandshakeResponse + .fromProtoV0(responseP) + // if deserialization failed it likely means we have a version conflict on the handshake itself + .leftMap(err => + HandshakeRequestError(s"Deserialization of response failed: $err", retryable = false) + ) + .toEitherT[Future] + } yield response + + override def sendAsyncSigned( + request: SignedContent[SubmissionRequest], + timeout: Duration, + )(implicit traceContext: TraceContext): EitherT[Future, SendAsyncClientError, Unit] = { + sendInternal( + stub => + stub.sendAsyncVersioned( + v0.SendAsyncVersionedRequest(signedSubmissionRequest = request.toByteString) + ), + "send-async-versioned", + request.content.messageId, + timeout, + SendAsyncResponse.fromSendAsyncSignedResponseProto, + ) + } + + override def sendAsyncUnauthenticatedVersioned(request: SubmissionRequest, timeout: Duration)( + implicit traceContext: TraceContext + ): EitherT[Future, SendAsyncClientError, Unit] = sendInternal( + stub => + stub.sendAsyncUnauthenticatedVersioned( + v0.SendAsyncUnauthenticatedVersionedRequest(submissionRequest = request.toByteString) + ), + "send-async-unauthenticated-versioned", + request.messageId, + timeout, + SendAsyncResponse.fromSendAsyncResponseProto, + ) + + private def sendInternal[Resp]( + send: SequencerServiceStub => Future[Resp], + endpoint: String, + messageId: MessageId, + timeout: Duration, + fromResponseProto: Resp => ParsingResult[SendAsyncResponse], + )(implicit traceContext: TraceContext): EitherT[Future, SendAsyncClientError, Unit] = { + // sends are at-most-once so we cannot retry when unavailable as we don't know if the request has been accepted + val sendAtMostOnce = retryPolicy(retryOnUnavailable = false) + val response = + CantonGrpcUtil.sendGrpcRequest(sequencerServiceClient, "sequencer")( + stub => send(stub), + requestDescription = s"$endpoint/$messageId", + timeout = timeout, + logger = logger, + logPolicy = noLoggingShutdownErrorsLogPolicy, + retryPolicy = sendAtMostOnce, + ) + response.biflatMap( + fromGrpcError(_, messageId).toEitherT, + fromResponse(_, fromResponseProto).toEitherT, + ) + } + + private def fromResponse[Proto]( + p: Proto, + deserializer: Proto => ParsingResult[SendAsyncResponse], + ) = { + for { + response <- deserializer(p) + .leftMap[SendAsyncClientError](err => + SendAsyncClientError.RequestFailed(s"Failed to deserialize response: $err") + ) + _ <- response.error.toLeft(()).leftMap(SendAsyncClientError.RequestRefused) + } yield () + } + + private def fromGrpcError(error: GrpcError, messageId: MessageId)(implicit + traceContext: TraceContext + ): Either[SendAsyncClientError, Unit] = { + val result = EitherUtil.condUnitE( + !bubbleSendErrorPolicy(error), + SendAsyncClientError.RequestFailed(s"Failed to make request to the server: $error"), + ) + + // log that we're swallowing the error + result.foreach { _ => + logger.info( + s"Send [$messageId] returned an error however may still be possibly sequenced so we are ignoring the error: $error" + ) + } + + result + } + + /** We receive grpc errors for a variety of reasons. The send operation is at-most-once and should only be bubbled up + * and potentially retried if we are absolutely certain the request will never be sequenced. + */ + private def bubbleSendErrorPolicy(error: GrpcError): Boolean = + error match { + // bad request refused by server + case _: GrpcError.GrpcClientError => true + // the request was rejected by the server as it wasn't in a state to accept it + case _: GrpcError.GrpcRequestRefusedByServer => true + // an internal error happened at the server, this could have been when constructing or sending the response + // after accepting the request so we cannot safely bubble the error + case _: GrpcError.GrpcServerError => false + // the service is unavailable, but this could have been returned after a request was received + case _: GrpcServiceUnavailable => false + // there was a timeout meaning we don't know what happened with the request + case _: GrpcError.GrpcClientGaveUp => false + } + + /** Retry policy to retry once for authentication failures to allow re-authentication and optionally retry when unavailable. */ + private def retryPolicy( + retryOnUnavailable: Boolean + )(implicit traceContext: TraceContext): GrpcError => Boolean = { + // we allow one retry if the failure was due to an auth token expiration + // if it's not refresh upon the next call we shouldn't retry again + val hasRetriedDueToTokenExpiration = new AtomicBoolean(false) + + error => + if (isClosing) false // don't even think about retrying if we're closing + else + error match { + case requestRefused: GrpcError.GrpcRequestRefusedByServer + if !hasRetriedDueToTokenExpiration + .get() && requestRefused.isAuthenticationTokenMissing => + logger.info( + "Retrying once to give the sequencer the opportunity to refresh the authentication token." + ) + hasRetriedDueToTokenExpiration.set(true) // don't allow again + true + // Retrying to recover from transient failures, e.g.: + // - network outages + // - sequencer starting up during integration tests + case _: GrpcServiceUnavailable => retryOnUnavailable + // don't retry on anything else as the request may have been received and a subsequent send may cause duplicates + case _ => false + } + } + + override def acknowledge(request: AcknowledgeRequest)(implicit + traceContext: TraceContext + ): Future[Unit] = { + val timestamp = request.timestamp + val requestP = request.toProtoV0 + val responseP = CantonGrpcUtil.sendGrpcRequest(sequencerServiceClient, "sequencer")( + _.acknowledge(requestP), + requestDescription = s"acknowledge/$timestamp", + timeout = timeouts.network.duration, + logger = logger, + logPolicy = noLoggingShutdownErrorsLogPolicy, + retryPolicy = retryPolicy(retryOnUnavailable = false), + ) + + logger.debug(s"Acknowledging timestamp: $timestamp") + responseP.value map { + case Left(error) => + logger.warn(s"Failed to send acknowledgement for $timestamp: $error") + case Right(_) => + logger.debug(s"Acknowledged timestamp: $timestamp") + } + } + + override def acknowledgeSigned(signedRequest: SignedContent[AcknowledgeRequest])(implicit + traceContext: TraceContext + ): EitherT[Future, String, Unit] = { + val request = signedRequest.content + val timestamp = request.timestamp + val requestP = signedRequest.toProtoV1 + logger.debug(s"Acknowledging timestamp: $timestamp") + CantonGrpcUtil + .sendGrpcRequest(sequencerServiceClient, "sequencer")( + _.acknowledgeSigned(requestP), + requestDescription = s"acknowledge-signed/$timestamp", + timeout = timeouts.network.duration, + logger = logger, + logPolicy = noLoggingShutdownErrorsLogPolicy, + retryPolicy = retryPolicy(retryOnUnavailable = false), + ) + .leftMap(_.toString) + .map(_ => logger.debug(s"Acknowledged timestamp: $timestamp")) + } + + override def downloadTopologyStateForInit(request: TopologyStateForInitRequest)(implicit + traceContext: TraceContext + ): EitherT[Future, String, TopologyStateForInitResponse] = { + logger.debug("Downloading topology state for initialization") + + ClientAdapter + .serverStreaming(request.toProtoV0, sequencerServiceClient.downloadTopologyStateForInit) + .map(TopologyStateForInitResponse.fromProtoV0(_)) + .flatMapConcat { parsingResult => + parsingResult.fold( + err => Source.failed(ProtoDeserializationFailure.Wrap(err).asGrpcError), + Source.single, + ) + } + .runFold(Vector.empty[GenericStoredTopologyTransactionX])((acc, txs) => + acc ++ txs.topologyTransactions.value.result + ) + .toEitherTRight[String] + .map { accumulated => + val storedTxs = StoredTopologyTransactionsX(accumulated) + logger.debug( + s"Downloaded topology state for initialization with last change timestamp at ${storedTxs.lastChangeTimestamp}:\n${storedTxs.result}" + ) + TopologyStateForInitResponse(Traced(storedTxs)) + } + } + + override protected def onClosed(): Unit = + Lifecycle.close( + clientAuth, + new CloseableChannel(channel, logger, "grpc-sequencer-transport"), + )(logger) +} + +class GrpcSequencerClientTransport( + channel: ManagedChannel, + callOptions: CallOptions, + clientAuth: GrpcSequencerClientAuth, + metrics: SequencerClientMetrics, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, +)(implicit + executionContext: ExecutionContext, + esf: ExecutionSequencerFactory, + materializer: Materializer, +) extends GrpcSequencerClientTransportCommon( + channel, + callOptions, + clientAuth, + timeouts, + loggerFactory, + ) + with SequencerClientTransport { + + override def subscribe[E]( + subscriptionRequest: SubscriptionRequest, + handler: SerializedEventHandler[E], + )(implicit traceContext: TraceContext): SequencerSubscription[E] = + subscribeInternal(subscriptionRequest, handler, requiresAuthentication = true) + + private def subscribeInternal[E]( + subscriptionRequest: SubscriptionRequest, + handler: SerializedEventHandler[E], + requiresAuthentication: Boolean, + )(implicit traceContext: TraceContext): SequencerSubscription[E] = { + // we intentionally don't use `Context.current()` as we don't want to inherit the + // cancellation scope from upstream requests + val context: CancellableContext = Context.ROOT.withCancellation() + + val subscription = GrpcSequencerSubscription.fromVersionedSubscriptionResponse( + context, + handler, + metrics, + timeouts, + loggerFactory, + ) + + context.run(() => + TraceContextGrpc.withGrpcContext(traceContext) { + if (requiresAuthentication) { + sequencerServiceClient.subscribeVersioned( + subscriptionRequest.toProtoV0, + subscription.observer, + ) + } else { + sequencerServiceClient.subscribeUnauthenticatedVersioned( + subscriptionRequest.toProtoV0, + subscription.observer, + ) + } + } + ) + + subscription + } + + override def subscribeUnauthenticated[E]( + request: SubscriptionRequest, + handler: SerializedEventHandler[E], + )(implicit traceContext: TraceContext): SequencerSubscription[E] = + subscribeInternal(request, handler, requiresAuthentication = false) + + override def subscriptionRetryPolicy: SubscriptionErrorRetryPolicy = + new GrpcSubscriptionErrorRetryPolicy(loggerFactory) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerClientTransportPekko.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerClientTransportPekko.scala new file mode 100644 index 0000000000..cc2fca92b1 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerClientTransportPekko.scala @@ -0,0 +1,209 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client.transports + +import cats.syntax.either.* +import com.daml.grpc.adapter.ExecutionSequencerFactory +import com.daml.grpc.adapter.client.pekko.ClientAdapter +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.domain.api.v0 +import com.digitalasset.canton.health.HealthComponent.AlwaysHealthyComponent +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.metrics.SequencerClientMetrics +import com.digitalasset.canton.networking.grpc.GrpcError +import com.digitalasset.canton.networking.grpc.GrpcError.GrpcServiceUnavailable +import com.digitalasset.canton.sequencing.OrdinarySerializedEvent +import com.digitalasset.canton.sequencing.client.{ + SequencerSubscriptionPekko, + SubscriptionErrorRetryPolicyPekko, +} +import com.digitalasset.canton.sequencing.protocol.{SubscriptionRequest, SubscriptionResponse} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.store.SequencedEventStore.OrdinarySequencedEvent +import com.digitalasset.canton.tracing.{SerializableTraceContext, TraceContext, TraceContextGrpc} +import com.digitalasset.canton.util.PekkoUtil.syntax.* +import com.digitalasset.canton.version.ProtocolVersion +import io.grpc.Context.CancellableContext +import io.grpc.stub.StreamObserver +import io.grpc.{CallOptions, Context, ManagedChannel, Status, StatusRuntimeException} +import org.apache.pekko.stream.Materializer +import org.apache.pekko.stream.scaladsl.{Keep, Source} + +import scala.concurrent.ExecutionContext + +class GrpcSequencerClientTransportPekko( + channel: ManagedChannel, + callOptions: CallOptions, + clientAuth: GrpcSequencerClientAuth, + metrics: SequencerClientMetrics, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + protocolVersion: ProtocolVersion, +)(implicit + executionContext: ExecutionContext, + executionSequencerFactory: ExecutionSequencerFactory, + materializer: Materializer, +) +// TODO(#13789) Extend GrpcSequencerClientTransportCommon and drop support for non-Pekko subscriptions + extends GrpcSequencerClientTransport( + channel, + callOptions, + clientAuth, + metrics, + timeouts, + loggerFactory, + ) + with SequencerClientTransportPekko { + + import GrpcSequencerClientTransportPekko.* + + override type SubscriptionError = GrpcSequencerSubscriptionError + + override def subscribe(request: SubscriptionRequest)(implicit + traceContext: TraceContext + ): SequencerSubscriptionPekko[SubscriptionError] = + subscribeInternal(request, requiresAuthentication = true) + + override def subscribeUnauthenticated(request: SubscriptionRequest)(implicit + traceContext: TraceContext + ): SequencerSubscriptionPekko[SubscriptionError] = + subscribeInternal(request, requiresAuthentication = false) + + private def subscribeInternal( + subscriptionRequest: SubscriptionRequest, + requiresAuthentication: Boolean, + )(implicit traceContext: TraceContext): SequencerSubscriptionPekko[SubscriptionError] = { + + val subscriptionRequestP = subscriptionRequest.toProtoV0 + + def mkSubscription[Resp: HasProtoTraceContext]( + subscriber: (v0.SubscriptionRequest, StreamObserver[Resp]) => Unit + )( + parseResponse: (Resp, TraceContext) => ParsingResult[SubscriptionResponse] + ): SequencerSubscriptionPekko[SubscriptionError] = { + val source = ClientAdapter + .serverStreaming[v0.SubscriptionRequest, Resp]( + subscriptionRequestP, + stubWithFreshContext(subscriber), + ) + .map(Right(_)) + .concatLazy( + // A sequencer subscription should never terminate; it's an endless stream. + // So if we see a termination, then insert an appropriate error. + // If there is an actual gRPC error, this source will not be evaluated as + // `recover` below completes the stream before emitting. + // See `PekkoUtilTest` for a unit test that this works as expected. + Source.lazySingle { () => + // Info level, as this occurs from time to time due to the invalidation of the authentication token. + logger.info("The sequencer subscription has been terminated by the server.") + val error = GrpcError( + "subscription", + "sequencer", + Status.UNAVAILABLE + .withDescription("Connection terminated by the server.") + .asRuntimeException(), + ) + Left(ExpectedGrpcFailure(error)) + } + ) + .recover(recoverOnError) + // Everything up to here runs "synchronously" and can deal with cancellations + // without causing shutdown synchronization problems + // Technically, everything below until `takeUntilThenDrain` also could deal with + // cancellations just fine, but we nevertheless establish the pattern here + // to see how well it scales + .withUniqueKillSwitchMat()(Keep.right) + .map( + _.map( + _.flatMap( + deserializeSubscriptionResponse(_)(parseResponse).leftMap(ResponseParseError) + ) + ) + ) + // Stop emitting after the first parse error + .takeUntilThenDrain(_.isLeft) + .watchTermination()(Keep.both) + SequencerSubscriptionPekko( + source, + // Transport does not report its health individually + new AlwaysHealthyComponent("GrpcSequencerClientTransport", logger), + ) + } + + val subscriber = + if (requiresAuthentication) sequencerServiceClient.subscribeVersioned _ + else sequencerServiceClient.subscribeUnauthenticatedVersioned _ + + mkSubscription(subscriber)(SubscriptionResponse.fromVersionedProtoV0(_)(_)) + } + + private def stubWithFreshContext[Req, Resp]( + stub: (Req, StreamObserver[Resp]) => Unit + )(req: Req, obs: StreamObserver[Resp])(implicit traceContext: TraceContext): Unit = { + // we intentionally don't use `Context.current()` as we don't want to inherit the + // cancellation scope from upstream requests + val context: CancellableContext = Context.ROOT.withCancellation() + + context.run { () => + TraceContextGrpc.withGrpcContext(traceContext) { + stub(req, obs) + } + } + } + + private def deserializeSubscriptionResponse[R: HasProtoTraceContext](subscriptionResponseP: R)( + fromProto: (R, TraceContext) => ParsingResult[SubscriptionResponse] + ): ParsingResult[OrdinarySerializedEvent] = { + // we take the unusual step of immediately trying to deserialize the trace-context + // so it is available here for logging + implicit val traceContext: TraceContext = SerializableTraceContext + .fromProtoSafeV0Opt(noTracingLogger)( + implicitly[HasProtoTraceContext[R]].traceContext(subscriptionResponseP) + ) + .unwrap + logger.debug("Received a message from the sequencer.") + fromProto(subscriptionResponseP, traceContext).map { response => + OrdinarySequencedEvent(response.signedSequencedEvent, response.trafficState)( + response.traceContext + ) + } + } + + private def recoverOnError(implicit + traceContext: TraceContext + ): Throwable PartialFunction Either[GrpcSequencerSubscriptionError, Nothing] = { + case s: StatusRuntimeException => + val grpcError = if (s.getStatus.getCode() == io.grpc.Status.CANCELLED) { + // Since recoverOnError sits before the kill switch in the stream, + // this error will be passed downstream only if the cancellation came from the server. + // For if the subscription was cancelled by the client via the kill switch, + // the kill switch won't let it through any more. + GrpcServiceUnavailable( + "subscription", + "sequencer", + s.getStatus, + Option(s.getTrailers), + None, + ) + } else + GrpcError("subscription", "sequencer", s) + Left(ExpectedGrpcFailure(grpcError)) + case t: Throwable => + logger.error("The sequencer subscription failed unexpectedly.", t) + Left(UnexpectedGrpcFailure(t)) + } + + override def subscriptionRetryPolicyPekko: SubscriptionErrorRetryPolicyPekko[SubscriptionError] = + new GrpcSubscriptionErrorRetryPolicyPekko() +} + +object GrpcSequencerClientTransportPekko { + sealed trait GrpcSequencerSubscriptionError extends Product with Serializable + + final case class ExpectedGrpcFailure(error: GrpcError) extends GrpcSequencerSubscriptionError + final case class UnexpectedGrpcFailure(ex: Throwable) extends GrpcSequencerSubscriptionError + final case class ResponseParseError(error: ProtoDeserializationError) + extends GrpcSequencerSubscriptionError +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerSubscription.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerSubscription.scala new file mode 100644 index 0000000000..a7020d4e7d --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSequencerSubscription.scala @@ -0,0 +1,299 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client.transports + +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.domain.api.v0 +import com.digitalasset.canton.lifecycle.UnlessShutdown.{AbortedDueToShutdown, Outcome} +import com.digitalasset.canton.lifecycle.* +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.logging.NamedLogging.loggerWithoutTracing +import com.digitalasset.canton.metrics.SequencerClientMetrics +import com.digitalasset.canton.networking.grpc.GrpcError +import com.digitalasset.canton.networking.grpc.GrpcError.GrpcServiceUnavailable +import com.digitalasset.canton.sequencing.SerializedEventHandler +import com.digitalasset.canton.sequencing.client.{SequencerSubscription, SubscriptionCloseReason} +import com.digitalasset.canton.sequencing.protocol.SubscriptionResponse +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.store.SequencedEventStore.OrdinarySequencedEvent +import com.digitalasset.canton.tracing.TraceContext.withTraceContext +import com.digitalasset.canton.tracing.{NoTracing, SerializableTraceContext, TraceContext, Traced} +import com.digitalasset.canton.util.FutureUtil +import com.google.common.annotations.VisibleForTesting +import io.grpc.Context.CancellableContext +import io.grpc.Status.Code.CANCELLED +import io.grpc.stub.StreamObserver +import io.grpc.{Status, StatusRuntimeException} + +import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference} +import scala.concurrent.* +import scala.util.Try + +/** Supply the grpc error that caused the subscription to fail */ +final case class GrpcSubscriptionError(grpcError: GrpcError) + extends SubscriptionCloseReason.SubscriptionError + +/** Supply the grpc error and specially tag permission denied issues */ +final case class GrpcPermissionDeniedError(grpcError: GrpcError) + extends SubscriptionCloseReason.PermissionDeniedError + +/** The GPRC subscription observer was called with an unexpected exception */ +final case class GrpcSubscriptionUnexpectedException(exception: Throwable) + extends SubscriptionCloseReason.SubscriptionError + +trait HasProtoTraceContext[R] { + def traceContext(value: R): Option[com.digitalasset.canton.v0.TraceContext] +} +object HasProtoTraceContext { + implicit val subscriptionResponseTraceContext: HasProtoTraceContext[v0.SubscriptionResponse] = + new HasProtoTraceContext[v0.SubscriptionResponse] { + override def traceContext(value: v0.SubscriptionResponse) = value.traceContext + } + + implicit val versionedSubscriptionResponseTraceContext + : HasProtoTraceContext[v0.VersionedSubscriptionResponse] = + new HasProtoTraceContext[v0.VersionedSubscriptionResponse] { + override def traceContext(value: v0.VersionedSubscriptionResponse) = value.traceContext + } +} + +@VisibleForTesting +class GrpcSequencerSubscription[E, R: HasProtoTraceContext] private[transports] ( + context: CancellableContext, + callHandler: Traced[R] => Future[Either[E, Unit]], + override val timeouts: ProcessingTimeout, + protected val loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext) + extends SequencerSubscription[E] + with NoTracing // tracing details are serialized within the items handled inside onNext + { + + /** Stores ongoing work performed by `onNext` or `complete`. + * The contained future is completed whenever these methods are not busy. + */ + private val currentProcessing = new AtomicReference[Future[Unit]](Future.unit) + private val currentAwaitOnNext = new AtomicReference[Promise[UnlessShutdown[Either[E, Unit]]]]( + Promise.successful(Outcome(Right(()))) + ) + + runOnShutdown_(new RunOnShutdown { + override def name: String = "cancel-current-await-in-onNext" + override def done: Boolean = currentAwaitOnNext.get.isCompleted + override def run(): Unit = currentAwaitOnNext.get.trySuccess(AbortedDueToShutdown).discard + }) + + private val cancelledByClient = new AtomicBoolean(false) + + private def cancel(): Unit = + if (!cancelledByClient.getAndSet(true)) context.close() + + private def appendToCurrentProcessing(next: Try[Unit] => Future[Unit]): Unit = { + val newPromise = Promise[Unit]() + val oldFuture = currentProcessing.getAndSet(newPromise.future) + newPromise.completeWith(oldFuture.transformWith { outcome => + FutureUtil.logOnFailure( + Future.fromTry(Try(next(outcome))).flatten, + "An unexpected exception has occurred in currentProcessing.", + ) + }) + } + + override private[canton] def complete( + result: SubscriptionCloseReason[E] + )(implicit traceContext: TraceContext): Unit = { + // Make sure that result is emitted, once the current processing has completed. + appendToCurrentProcessing { outcome => + val completion = outcome.map(_ => result) + if (closeReasonPromise.tryComplete(completion)) { + logger.debug(s"Completed subscription with $completion") + } else { + logger.debug(s"Already completed. Discarding $result") + } + Future.unit + } + + // Make sure that no further events will be processed + cancel() + close() + } + + override protected def closeAsync(): Seq[AsyncOrSyncCloseable] = { + // Signal termination by client + val completionF = Future { complete(SubscriptionCloseReason.Closed) } + val onTimeout = (ex: TimeoutException) => { + logger.warn(s"Clean close of the ${this.getClass} timed out", ex) + closeReasonPromise.tryFailure(ex).discard[Boolean] + } + + /* + Potential reason of failure to close within the timeout: current processing + is blocked, probably because the `callHandler` is not making progress. + For example, this might happen due to a db outage. + */ + Seq( + SyncCloseable("grpc-context", cancel()), // Tell GRPC to stop receiving messages + AsyncCloseable( + "grpc-sequencer-subscription", + completionF, + timeouts.shutdownShort, + onTimeout = onTimeout, + ), + ) + } + + @VisibleForTesting // so unit tests can call onNext, onError and onComplete + private[transports] val observer = new StreamObserver[R] { + override def onNext(value: R): Unit = { + // we take the unusual step of immediately trying to deserialize the trace-context + // so it is available here for logging + implicit val traceContext: TraceContext = + SerializableTraceContext + .fromProtoSafeV0Opt(loggerWithoutTracing(logger))( + implicitly[HasProtoTraceContext[R]].traceContext(value) + ) + .unwrap + + logger.debug("Received a message from the sequencer.") + + val current = Promise[Unit]() + val closeReasonOO = performUnlessClosing(functionFullName) { + try { + appendToCurrentProcessing(_ => current.future) + + // as we're responsible for calling the handler we block onNext from processing further items + // calls to onNext are guaranteed to happen in order + + val handlerResult = Try { + val cancelableAwait = Promise[UnlessShutdown[Either[E, Unit]]]() + currentAwaitOnNext.set(cancelableAwait) + cancelableAwait.completeWith(callHandler(Traced(value)).map(Outcome(_))) + timeouts.unbounded + .await(s"${this.getClass}: Blocking processing of further items")( + cancelableAwait.future + ) + } + + handlerResult.fold[Option[SubscriptionCloseReason[E]]]( + ex => Some(SubscriptionCloseReason.HandlerException(ex)), + { + case Outcome(Left(err)) => + Some(SubscriptionCloseReason.HandlerError(err)) + case Outcome(Right(_)) => + // we'll continue + None + case AbortedDueToShutdown => + Some(SubscriptionCloseReason.Shutdown) + }, + ) + } finally current.success(()) + } + + // if a close reason was returned, close the subscription + closeReasonOO match { + case UnlessShutdown.Outcome(maybeCloseReason) => + maybeCloseReason.foreach(complete) + logger.debug("Finished processing of the sequencer message.") + case UnlessShutdown.AbortedDueToShutdown => + logger.debug(s"The message is not processed, as the node is closing.") + } + } + + override def onError(t: Throwable): Unit = { + t match { + case s: StatusRuntimeException if s.getStatus.getCode == CANCELLED => + if (cancelledByClient.get()) { + logger.info( + "GRPC subscription successfully closed due to client shutdown.", + s.getStatus.getCause, + ) + complete(SubscriptionCloseReason.Closed) + } else { + // As the client has not cancelled the subscription, the problem must be on the server side. + val grpcError = + GrpcServiceUnavailable( + "subscription", + "sequencer", + s.getStatus, + Option(s.getTrailers), + None, + ) + complete(GrpcSubscriptionError(grpcError)) + } + case s: StatusRuntimeException => + val grpcError = GrpcError("subscription", "sequencer", s) + complete( + if (s.getStatus.getCode == Status.Code.PERMISSION_DENIED) + GrpcPermissionDeniedError(grpcError) + else GrpcSubscriptionError(grpcError) + ) + case exception: Throwable => + logger.error("The sequencer subscription failed unexpectedly.", t) + complete(GrpcSubscriptionUnexpectedException(exception)) + } + } + + override def onCompleted(): Unit = { + // Info level, as this occurs from time to time due to the invalidation of the authentication token. + logger.info("The sequencer subscription has been terminated by the server.") + complete( + GrpcSubscriptionError( + GrpcError( + "subscription", + "sequencer", + Status.UNAVAILABLE + .withDescription("Connection terminated by the server.") + .asRuntimeException(), + ) + ) + ) + } + + } +} + +object GrpcSequencerSubscription { + def fromVersionedSubscriptionResponse[E]( + context: CancellableContext, + handler: SerializedEventHandler[E], + metrics: SequencerClientMetrics, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit + executionContext: ExecutionContext + ): GrpcSequencerSubscription[E, v0.VersionedSubscriptionResponse] = + new GrpcSequencerSubscription( + context, + deserializingSubscriptionHandler( + handler, + (value, traceContext) => SubscriptionResponse.fromVersionedProtoV0(value)(traceContext), + ), + timeouts, + loggerFactory, + ) + + private def deserializingSubscriptionHandler[E, R]( + handler: SerializedEventHandler[E], + fromProto: (R, TraceContext) => ParsingResult[SubscriptionResponse], + ): Traced[R] => Future[Either[E, Unit]] = { + withTraceContext { implicit traceContext => responseP => + fromProto(responseP, traceContext) + .fold( + err => + Future.failed( + new RuntimeException( + s"Unable to parse response from sequencer. Discarding message. Reason: $err" + ) + ), + response => { + val signedEvent = response.signedSequencedEvent + val ordinaryEvent = + OrdinarySequencedEvent(signedEvent, response.trafficState)(response.traceContext) + handler(ordinaryEvent) + }, + ) + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSubscriptionErrorRetryPolicy.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSubscriptionErrorRetryPolicy.scala new file mode 100644 index 0000000000..274bc56561 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSubscriptionErrorRetryPolicy.scala @@ -0,0 +1,84 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client.transports + +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.networking.grpc.GrpcError +import com.digitalasset.canton.sequencing.client.CheckedSubscriptionErrorRetryPolicy +import com.digitalasset.canton.tracing.TraceContext +import io.grpc.Status + +import GrpcSubscriptionErrorRetryPolicy.* + +class GrpcSubscriptionErrorRetryPolicy(protected val loggerFactory: NamedLoggerFactory) + extends CheckedSubscriptionErrorRetryPolicy[GrpcSubscriptionError] + with NamedLogging { + override protected def retryInternal(error: GrpcSubscriptionError, receivedItems: Boolean)( + implicit traceContext: TraceContext + ): Boolean = logAndDetermineRetry(error.grpcError, receivedItems) +} + +object GrpcSubscriptionErrorRetryPolicy { + implicit class EnhancedGrpcStatus(val status: io.grpc.Status) extends AnyVal { + def hasClosedChannelExceptionCause: Boolean = status.getCause match { + case _: java.nio.channels.ClosedChannelException => true + case _ => false + } + } + + private[transports] def logAndDetermineRetry( + grpcError: GrpcError, + receivedItems: Boolean, + )(implicit loggingContext: ErrorLoggingContext): Boolean = { + grpcError match { + case _: GrpcError.GrpcServiceUnavailable => + val causes = Seq(grpcError.status.getDescription) ++ GrpcError.collectCauses( + Option(grpcError.status.getCause) + ) + loggingContext.info( + s"Trying to reconnect to give the sequencer the opportunity to become available again (after ${causes + .mkString(", ")})" + ) + true + + case error: GrpcError.GrpcRequestRefusedByServer => + val retry = error.isAuthenticationTokenMissing + if (retry) + loggingContext.info( + s"Trying to reconnect to give the sequencer the opportunity to refresh the authentication token." + ) + else + loggingContext.debug("Not trying to reconnect.") + retry + + case serverError: GrpcError.GrpcServerError + if receivedItems && serverError.status.getCode == Status.INTERNAL.getCode => + // a connection reset by an intermediary can cause GRPC to raise an INTERNAL error. + // (this is seen when the GCloud load balancer times out subscriptions on the global domain) + // if we've received any items during the course of the subscription we will assume its fine to reconnect. + // if there is actually an application issue with the server, we'd expect it to immediately fail and then + // it will not retry its connection + loggingContext.debug( + s"After successfully receiving some events the sequencer subscription received an error. Retrying subscription." + ) + true + + case serverError: GrpcError.GrpcServerError + if serverError.status.getCode == Status.UNKNOWN.getCode && serverError.status.hasClosedChannelExceptionCause => + // In this conversation https://gitter.im/grpc/grpc?at=5f464aa854288c687ee06a25 + // someone who maintains the grpc codebase explains: + // "'Channel closed' is when we have no knowledge as to what went wrong; it could be anything". + // In practice, we've seen this peculiar error sometimes appear when the sequencer goes unavailable, + // so let's make sure to retry. + loggingContext.debug( + s"Closed channel exception can appear when the server becomes unavailable. Retrying." + ) + true + case _: GrpcError.GrpcClientGaveUp | _: GrpcError.GrpcClientError | + _: GrpcError.GrpcServerError => + loggingContext.info("Not reconnecting.") + false + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSubscriptionErrorRetryPolicyPekko.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSubscriptionErrorRetryPolicyPekko.scala new file mode 100644 index 0000000000..1238a24ead --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/GrpcSubscriptionErrorRetryPolicyPekko.scala @@ -0,0 +1,31 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client.transports + +import com.digitalasset.canton.logging.ErrorLoggingContext +import com.digitalasset.canton.sequencing.client.SubscriptionErrorRetryPolicyPekko +import com.digitalasset.canton.sequencing.client.transports.GrpcSequencerClientTransportPekko.GrpcSequencerSubscriptionError + +class GrpcSubscriptionErrorRetryPolicyPekko + extends SubscriptionErrorRetryPolicyPekko[GrpcSequencerSubscriptionError] { + override def retryOnError( + subscriptionError: GrpcSequencerSubscriptionError, + receivedItems: Boolean, + )(implicit loggingContext: ErrorLoggingContext): Boolean = { + subscriptionError match { + case GrpcSequencerClientTransportPekko.ExpectedGrpcFailure(error) => + GrpcSubscriptionErrorRetryPolicy.logAndDetermineRetry(error, receivedItems) + case GrpcSequencerClientTransportPekko.UnexpectedGrpcFailure(ex) => + loggingContext.error(s"Unexpected error type: $ex") + false + case GrpcSequencerClientTransportPekko.ResponseParseError(error) => + loggingContext.error(s"Failed to parse sequenced event: $error") + false + } + } + + override def retryOnException(ex: Throwable)(implicit + loggingContext: ErrorLoggingContext + ): Boolean = false +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/SequencerClientTransport.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/SequencerClientTransport.scala new file mode 100644 index 0000000000..8b641688d8 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/SequencerClientTransport.scala @@ -0,0 +1,78 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client.transports + +import cats.data.EitherT +import com.digitalasset.canton.lifecycle.FlagCloseable +import com.digitalasset.canton.sequencing.SerializedEventHandler +import com.digitalasset.canton.sequencing.client.{ + SendAsyncClientError, + SequencerSubscription, + SubscriptionErrorRetryPolicy, +} +import com.digitalasset.canton.sequencing.handshake.SupportsHandshake +import com.digitalasset.canton.sequencing.protocol.* +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.Future +import scala.concurrent.duration.Duration + +/** Implementation dependent operations for a client to write to a domain sequencer. */ +trait SequencerClientTransportCommon extends FlagCloseable with SupportsHandshake { + + /** Sends a signed submission request to the sequencer. + * If we failed to make the request, an error will be returned. + * If the sequencer accepted (or may have accepted) the request this call will return successfully. + */ + def sendAsyncSigned( + request: SignedContent[SubmissionRequest], + timeout: Duration, + )(implicit + traceContext: TraceContext + ): EitherT[Future, SendAsyncClientError, Unit] + + def sendAsyncUnauthenticatedVersioned( + request: SubmissionRequest, + timeout: Duration, + )(implicit + traceContext: TraceContext + ): EitherT[Future, SendAsyncClientError, Unit] + + /** Acknowledge that we have successfully processed all events up to and including the given timestamp. + * The client should then never subscribe for events from before this point. + */ + def acknowledge(request: AcknowledgeRequest)(implicit + traceContext: TraceContext + ): Future[Unit] + + def acknowledgeSigned(request: SignedContent[AcknowledgeRequest])(implicit + traceContext: TraceContext + ): EitherT[Future, String, Unit] + + def downloadTopologyStateForInit(request: TopologyStateForInitRequest)(implicit + traceContext: TraceContext + ): EitherT[Future, String, TopologyStateForInitResponse] +} + +/** Implementation dependent operations for a client to read and write to a domain sequencer. */ +trait SequencerClientTransport extends SequencerClientTransportCommon { + + /** Create a single subscription to read events from the Sequencer for this member starting from the counter defined in the request. + * Transports are currently responsible for calling the supplied handler. + * The handler must not be called concurrently and must receive events in-order. + * If the handler fails with an exception the subscription should close with a [[com.digitalasset.canton.sequencing.client.SubscriptionCloseReason.HandlerError]]. + * If the subscription fails for a technical reason it should close with a [[com.digitalasset.canton.sequencing.client.SubscriptionCloseReason.SubscriptionError]]. + * The transport is not expected to provide retries of subscriptions. + */ + def subscribe[E](request: SubscriptionRequest, handler: SerializedEventHandler[E])(implicit + traceContext: TraceContext + ): SequencerSubscription[E] + + def subscribeUnauthenticated[E](request: SubscriptionRequest, handler: SerializedEventHandler[E])( + implicit traceContext: TraceContext + ): SequencerSubscription[E] + + /** The transport can decide which errors will cause the sequencer client to not try to reestablish a subscription */ + def subscriptionRetryPolicy: SubscriptionErrorRetryPolicy +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/SequencerClientTransportPekko.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/SequencerClientTransportPekko.scala new file mode 100644 index 0000000000..081195cc9d --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/SequencerClientTransportPekko.scala @@ -0,0 +1,36 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client.transports + +import com.digitalasset.canton.sequencing.client.{ + SequencerSubscriptionPekko, + SubscriptionErrorRetryPolicyPekko, +} +import com.digitalasset.canton.sequencing.protocol.SubscriptionRequest +import com.digitalasset.canton.tracing.TraceContext + +/** Implementation dependent operations for a client to read and write to a domain sequencer. */ +trait SequencerClientTransportPekko extends SequencerClientTransportCommon { + + type SubscriptionError + + /** Create a single subscription to read events from the Sequencer for this member starting from the counter defined in the request. + * The transport is not expected to provide retries of subscriptions. + */ + def subscribe(request: SubscriptionRequest)(implicit + traceContext: TraceContext + ): SequencerSubscriptionPekko[SubscriptionError] + + def subscribeUnauthenticated(request: SubscriptionRequest)(implicit + traceContext: TraceContext + ): SequencerSubscriptionPekko[SubscriptionError] + + /** The transport can decide which errors will cause the sequencer client to not try to reestablish a subscription */ + def subscriptionRetryPolicyPekko: SubscriptionErrorRetryPolicyPekko[SubscriptionError] + +} + +object SequencerClientTransportPekko { + type Aux[E] = SequencerClientTransportPekko { type SubscriptionError = E } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingEventsSequencerClientTransport.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingEventsSequencerClientTransport.scala new file mode 100644 index 0000000000..cc2f3b2d94 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingEventsSequencerClientTransport.scala @@ -0,0 +1,174 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client.transports.replay + +import cats.data.EitherT +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.sequencing.client.SequencerClient.ReplayStatistics +import com.digitalasset.canton.sequencing.client.* +import com.digitalasset.canton.sequencing.client.transports.replay.ReplayingEventsSequencerClientTransport.ReplayingSequencerSubscription +import com.digitalasset.canton.sequencing.client.transports.{ + SequencerClientTransport, + SequencerClientTransportPekko, +} +import com.digitalasset.canton.sequencing.handshake.HandshakeRequestError +import com.digitalasset.canton.sequencing.protocol.{ + AcknowledgeRequest, + HandshakeRequest, + HandshakeResponse, + SignedContent, + SubmissionRequest, + SubscriptionRequest, + TopologyStateForInitRequest, + TopologyStateForInitResponse, +} +import com.digitalasset.canton.sequencing.{SequencerClientRecorder, SerializedEventHandler} +import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX +import com.digitalasset.canton.tracing.{TraceContext, Traced} +import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.canton.util.{ErrorUtil, FutureUtil, MonadUtil} +import com.digitalasset.canton.version.ProtocolVersion + +import java.nio.file.Path +import java.time.{Duration as JDuration} +import scala.concurrent.duration.Duration +import scala.concurrent.{ExecutionContext, Future} + +/** Transport implementation for replaying messages from a file. + * @param replayPath points to a file containing events to be replayed. + * The events must be serialized versions of `TracedSignedSerializedSequencedEvent`. + */ +class ReplayingEventsSequencerClientTransport( + protocolVersion: ProtocolVersion, + replayPath: Path, + override protected val timeouts: ProcessingTimeout, + override protected val loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext) + extends SequencerClientTransport + with SequencerClientTransportPekko + with NamedLogging { + + /** Does nothing */ + override def sendAsyncSigned( + request: SignedContent[SubmissionRequest], + timeout: Duration, + )(implicit traceContext: TraceContext): EitherT[Future, SendAsyncClientError, Unit] = + EitherT.rightT(()) + + /** Does nothing */ + override def sendAsyncUnauthenticatedVersioned(request: SubmissionRequest, timeout: Duration)( + implicit traceContext: TraceContext + ): EitherT[Future, SendAsyncClientError, Unit] = EitherT.rightT(()) + + /** Does nothing */ + override def acknowledge(request: AcknowledgeRequest)(implicit + traceContext: TraceContext + ): Future[Unit] = Future.unit + + /** Does nothing */ + override def acknowledgeSigned(request: SignedContent[AcknowledgeRequest])(implicit + traceContext: TraceContext + ): EitherT[Future, String, Unit] = + EitherT.rightT(()) + + /** Replays all events in `replayPath` to the handler. */ + override def subscribe[E](request: SubscriptionRequest, handler: SerializedEventHandler[E])( + implicit traceContext: TraceContext + ): ReplayingSequencerSubscription[E] = { + logger.info("Loading messages for replaying...") + val messages = ErrorUtil.withThrowableLogging { + SequencerClientRecorder.loadEvents(replayPath, logger) + } + logger.info(s"Start feeding ${messages.size} messages to the subscription...") + val startTime = CantonTimestamp.now() + val startNanos = System.nanoTime() + val replayF = MonadUtil + .sequentialTraverse_(messages) { e => + logger.debug( + s"Replaying event with sequencer counter ${e.counter} and timestamp ${e.timestamp}" + )(e.traceContext) + for { + unitOrErr <- handler(e) + } yield unitOrErr match { + case Left(err) => + logger.error(s"The sequencer handler returned an error: $err") + case Right(()) => + } + } + .map { _ => + val stopNanos = System.nanoTime() + val duration = JDuration.ofNanos(stopNanos - startNanos) + logger.info( + show"Finished feeding ${messages.size} messages within $duration to the subscription." + ) + SequencerClient.replayStatistics.add( + ReplayStatistics(replayPath, messages.size, startTime, duration) + ) + } + + FutureUtil.doNotAwait(replayF, "An exception has occurred while replaying messages.") + new ReplayingSequencerSubscription(timeouts, loggerFactory) + } + + override def subscribeUnauthenticated[E]( + request: SubscriptionRequest, + handler: SerializedEventHandler[E], + )(implicit traceContext: TraceContext): SequencerSubscription[E] = subscribe(request, handler) + + /** Will never request a retry. */ + override def subscriptionRetryPolicy: SubscriptionErrorRetryPolicy = + SubscriptionErrorRetryPolicy.never + + /** Will always succeed. */ + override def handshake(request: HandshakeRequest)(implicit + traceContext: TraceContext + ): EitherT[Future, HandshakeRequestError, HandshakeResponse] = + EitherT.rightT(HandshakeResponse.Success(protocolVersion)) + + override def downloadTopologyStateForInit(request: TopologyStateForInitRequest)(implicit + traceContext: TraceContext + ): EitherT[Future, String, TopologyStateForInitResponse] = + EitherT.rightT[Future, String]( + TopologyStateForInitResponse( + topologyTransactions = Traced(StoredTopologyTransactionsX.empty) + ) + ) + + override type SubscriptionError = Nothing + + override def subscribe(request: SubscriptionRequest)(implicit + traceContext: TraceContext + ): SequencerSubscriptionPekko[SubscriptionError] = + // TODO(#13789) figure out how to implement this + ErrorUtil.internalError( + new UnsupportedOperationException("subscribe(SubmissionRequest) is not yet implemented") + ) + + override def subscribeUnauthenticated(request: SubscriptionRequest)(implicit + traceContext: TraceContext + ): SequencerSubscriptionPekko[Nothing] = subscribe(request) + + override def subscriptionRetryPolicyPekko: SubscriptionErrorRetryPolicyPekko[Nothing] = + SubscriptionErrorRetryPolicyPekko.never +} + +object ReplayingEventsSequencerClientTransport { + + /** Does nothing until closed or completed. */ + class ReplayingSequencerSubscription[E]( + override protected val timeouts: ProcessingTimeout, + override protected val loggerFactory: NamedLoggerFactory, + )(implicit val executionContext: ExecutionContext) + extends SequencerSubscription[E] { + override private[canton] def complete(reason: SubscriptionCloseReason[E])(implicit + traceContext: TraceContext + ): Unit = { + closeReasonPromise.trySuccess(reason).discard[Boolean] + close() + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingSendsSequencerClientTransport.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingSendsSequencerClientTransport.scala new file mode 100644 index 0000000000..070c8199a3 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingSendsSequencerClientTransport.scala @@ -0,0 +1,537 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.client.transports.replay + +import cats.data.EitherT +import cats.syntax.traverse.* +import com.codahale.metrics.{ConsoleReporter, MetricFilter, MetricRegistry} +import com.daml.metrics.api.MetricsContext.withEmptyMetricsContext +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.crypto.HashPurpose +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.* +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.metrics.SequencerClientMetrics +import com.digitalasset.canton.sequencing.client.* +import com.digitalasset.canton.sequencing.client.transports.{ + SequencerClientTransport, + SequencerClientTransportCommon, + SequencerClientTransportPekko, +} +import com.digitalasset.canton.sequencing.handshake.HandshakeRequestError +import com.digitalasset.canton.sequencing.protocol.* +import com.digitalasset.canton.sequencing.{ + OrdinarySerializedEvent, + SequencerClientRecorder, + SerializedEventHandler, +} +import com.digitalasset.canton.topology.Member +import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX +import com.digitalasset.canton.tracing.TraceContext.withNewTraceContext +import com.digitalasset.canton.tracing.{NoTracing, TraceContext, Traced} +import com.digitalasset.canton.util.ResourceUtil.withResource +import com.digitalasset.canton.util.{ErrorUtil, OptionUtil, PekkoUtil} +import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.{DiscardOps, SequencerCounter} +import org.apache.pekko.NotUsed +import org.apache.pekko.stream.Materializer +import org.apache.pekko.stream.scaladsl.{Keep, Sink, Source} + +import java.io.{ByteArrayOutputStream, PrintStream} +import java.nio.file.Path +import java.time.Instant +import java.util.concurrent.atomic.AtomicReference +import scala.collection.concurrent.TrieMap +import scala.concurrent.duration.{Duration, FiniteDuration} +import scala.concurrent.{ExecutionContext, Future, Promise} +import scala.jdk.DurationConverters.* +import scala.util.chaining.* + +/** Replays previously recorded sends against the configured sequencer and using a real sequencer client transport. + * Records the latencies/rates to complete the send itself, and latencies/rates for an event that was caused by the send to be witnessed. + * These metrics are currently printed to stdout. + * Sequencers are able to drop sends so to know when all sends have likely been sequenced we simply wait for a period + * where no events are received for a configurable duration. This isn't perfect as technically a sequencer could stall, + * however the inflight gauge will report a number greater than 0 indicating that these sends have gone missing. + * Clients are responsible for interacting with the transport to initiate a replay and wait for observed events to + * be idle. A reference can be obtained to this transport component by waiting on the future provided in [[ReplayAction.SequencerSends]]. + * This testing transport is very stateful and the metrics will only make sense for a single replay, + * however currently multiple or even concurrent calls are not prevented (just don't). + */ +trait ReplayingSendsSequencerClientTransport extends SequencerClientTransportCommon { + import ReplayingSendsSequencerClientTransport.* + def replay(sendParallelism: Int): Future[SendReplayReport] + + def waitForIdle( + duration: FiniteDuration, + startFromCounter: SequencerCounter = SequencerCounter.Genesis, + ): Future[EventsReceivedReport] + + /** Dump the submission related metrics into a string for periodic reporting during the replay test */ + def metricReport(registry: MetricRegistry): String +} + +object ReplayingSendsSequencerClientTransport { + final case class SendReplayReport(successful: Int = 0, overloaded: Int = 0, errors: Int = 0)( + sendDuration: => Option[java.time.Duration] + ) { + def update(result: Either[SendAsyncClientError, Unit]): SendReplayReport = result match { + case Left(SendAsyncClientError.RequestRefused(_: SendAsyncError.Overloaded)) => + copy(overloaded = overloaded + 1) + case Left(_) => copy(errors = errors + 1) + case Right(_) => copy(successful = successful + 1) + } + + def copy( + successful: Int = this.successful, + overloaded: Int = this.overloaded, + errors: Int = this.errors, + ): SendReplayReport = SendReplayReport(successful, overloaded, errors)(sendDuration) + + lazy val total: Int = successful + overloaded + errors + + override def toString: String = { + val durationSecsText = sendDuration.map(_.getSeconds).map(secs => s"${secs}s").getOrElse("?") + s"Sent $total send requests in $durationSecsText ($successful successful, $overloaded overloaded, $errors errors)" + } + } + + final case class EventsReceivedReport( + elapsedDuration: FiniteDuration, + totalEventsReceived: Int, + finishedAtCounter: SequencerCounter, + ) { + override def toString: String = + s"Received $totalEventsReceived events within ${elapsedDuration.toSeconds}s" + } + +} + +abstract class ReplayingSendsSequencerClientTransportCommon( + protocolVersion: ProtocolVersion, + recordedPath: Path, + replaySendsConfig: ReplayAction.SequencerSends, + member: Member, + underlyingTransport: SequencerClientTransportCommon, + requestSigner: RequestSigner, + metrics: SequencerClientMetrics, + override protected val timeouts: ProcessingTimeout, + override protected val loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext, materializer: Materializer) + extends ReplayingSendsSequencerClientTransport + with NamedLogging + with NoTracing + with FlagCloseableAsync { + import ReplayingSendsSequencerClientTransport.* + + private val pendingSends = TrieMap[MessageId, CantonTimestamp]() + private val firstSend = new AtomicReference[Option[CantonTimestamp]](None) + private val lastSend = new AtomicReference[Option[CantonTimestamp]](None) + private val lastReceivedEvent = new AtomicReference[Option[CantonTimestamp]](None) + + private val submissionRequests: List[SubmissionRequest] = withNewTraceContext { + implicit traceContext => + logger.debug("Loading recorded submission requests") + ErrorUtil.withThrowableLogging { + SequencerClientRecorder.loadSubmissions(recordedPath, logger) + } + } + + // Signals to the tests that this transport is ready to interact with + replaySendsConfig.publishTransport(this) + + private def sendDuration: Option[java.time.Duration] = + OptionUtil + .zipWith(firstSend.get().map(_.toInstant), lastSend.get().map(_.toInstant))( + java.time.Duration.between + ) + + private def replaySubmit( + submission: SubmissionRequest + ): Future[Either[SendAsyncClientError, Unit]] = { + val startedAt = CantonTimestamp.now() + // we'll correlate received events by looking at their message-id and calculate the + // latency of the send by comparing now to the time the event eventually arrives + pendingSends.put(submission.messageId, startedAt).discard + + // Picking a correct max sequencing time could be technically difficult, + // so instead we pick the biggest point in time that should ensure the sequencer always + // attempts to sequence valid sends + def extendMaxSequencingTime(submission: SubmissionRequest): SubmissionRequest = + submission.copy(maxSequencingTime = CantonTimestamp.MaxValue) + + def handleSendResult( + result: Either[SendAsyncClientError, Unit] + ): Either[SendAsyncClientError, Unit] = + withEmptyMetricsContext { implicit metricsContext => + result.tap { + case Left(SendAsyncClientError.RequestRefused(_: SendAsyncError.Overloaded)) => + logger.warn( + s"Sequencer is overloaded and rejected our send. Please tune the sequencer to handle more concurrent requests." + ) + metrics.submissions.overloaded.inc() + + case Left(error) => + // log, increase error counter, then ignore + logger.warn(s"Send request failed: $error") + + case Right(_) => + // we've successfully sent the send request + metrics.submissions.inFlight.inc() + val sentAt = CantonTimestamp.now() + metrics.submissions.sends + .update(java.time.Duration.between(startedAt.toInstant, sentAt.toInstant)) + } + } + + def updateTimestamps[A](item: A): A = { + val now = CantonTimestamp.now() + // only set the first send timestamp if none have been resent + firstSend.compareAndSet(None, Some(now)) + lastSend.set(Some(now)) + item + } + + TraceContext.withNewTraceContext { traceContext => + val withExtendedMst = extendMaxSequencingTime(submission) + val sendET = for { + // We need a new signature because we've modified the max sequencing time. + signedRequest <- requestSigner + .signRequest(withExtendedMst, HashPurpose.SubmissionRequestSignature)( + implicitly, + traceContext, + ) + .leftMap(error => + SendAsyncClientError.RequestRefused(SendAsyncError.RequestRefused(error)) + ) + _ <- underlyingTransport.sendAsyncSigned( + signedRequest, + replaySendsConfig.sendTimeout.toScala, + )(traceContext) + } yield () + + sendET.value + .map(handleSendResult) + .map(updateTimestamps) + } + } + + override def replay(sendParallelism: Int): Future[SendReplayReport] = withNewTraceContext { + implicit traceContext => + logger.info(s"Replaying ${submissionRequests.size} sends") + + val submissionReplay = Source(submissionRequests) + .mapAsyncUnordered(sendParallelism)(replaySubmit) + .toMat(Sink.fold(SendReplayReport()(sendDuration))(_.update(_)))(Keep.right) + + PekkoUtil.runSupervised(logger.error("Failed to run submission replay", _), submissionReplay) + } + + override def waitForIdle( + duration: FiniteDuration, + startFromCounter: SequencerCounter = SequencerCounter.Genesis, + ): Future[EventsReceivedReport] = { + val monitor = new SimpleIdlenessMonitor(startFromCounter, duration, timeouts, loggerFactory) + + monitor.idleF transform { result => + monitor.close() + + result + } + } + + /** Dump the submission related metrics into a string for periodic reporting during the replay test */ + override def metricReport(registry: MetricRegistry): String = + withResource(new ByteArrayOutputStream()) { os => + withResource(new PrintStream(os)) { ps => + withResource( + ConsoleReporter + .forRegistry(registry) + .filter(MetricFilter.startsWith(metrics.submissions.prefix.toString())) + .outputTo(ps) + .build() + ) { reporter => + reporter.report() + ps.flush() + os.toString() + } + } + } + + protected def subscribe( + request: SubscriptionRequest, + handler: SerializedEventHandler[NotUsed], + ): AutoCloseable + + /** Monitor that when created subscribes the underlying transports and waits for Deliver or DeliverError events + * to stop being observed for the given [[idlenessDuration]] (suggesting that there are no more events being + * produced for the member). + */ + private class SimpleIdlenessMonitor( + readFrom: SequencerCounter, + idlenessDuration: FiniteDuration, + override protected val timeouts: ProcessingTimeout, + protected val loggerFactory: NamedLoggerFactory, + ) extends FlagCloseableAsync + with NamedLogging { + private case class State( + startedAt: CantonTimestamp, + lastEventAt: CantonTimestamp, + eventCounter: Int, + lastCounter: SequencerCounter, + ) + + private val lastDeliverRef: AtomicReference[Option[State]] = new AtomicReference(None) + private val idleP = Promise[EventsReceivedReport]() + + private def scheduleCheck(): Unit = { + performUnlessClosing(functionFullName) { + val elapsed = lastDeliverRef + .get() + .map(_.lastEventAt.toInstant) + .map(java.time.Duration.between(_, Instant.now())) + .getOrElse(java.time.Duration.ZERO) + val nextCheckDuration = idlenessDuration.toJava.minus(elapsed) + + val _ = materializer.scheduleOnce(nextCheckDuration.toScala, () => checkIfIdle()) + }.onShutdown(()) + } + + scheduleCheck() // kick off checks + + private def updateLastDeliver(counter: SequencerCounter): Unit = { + val _ = lastDeliverRef.updateAndGet { + case None => + Some( + State( + startedAt = CantonTimestamp.now(), + lastEventAt = CantonTimestamp.now(), + eventCounter = 1, + lastCounter = counter, + ) + ) + case Some(state @ State(_, _, eventCounter, _)) => + Some( + state.copy( + lastEventAt = CantonTimestamp.now(), + lastCounter = counter, + eventCounter = eventCounter + 1, + ) + ) + } + } + + private def checkIfIdle(): Unit = { + val isIdle = lastDeliverRef.get() exists { + case State(_startedAt, lastEventAt, eventCounter, lastCounter) => + val elapsed = + java.time.Duration.between(lastEventAt.toInstant, CantonTimestamp.now().toInstant) + val isIdle = elapsed.compareTo(idlenessDuration.toJava) >= 0 + + if (isIdle) { + idleP + .trySuccess( + EventsReceivedReport( + elapsed.toScala, + totalEventsReceived = eventCounter, + finishedAtCounter = lastCounter, + ) + ) + .discard + } + + isIdle + } + + if (!isIdle) scheduleCheck() // schedule the next check + } + + private def updateMetrics(event: SequencedEvent[ClosedEnvelope]): Unit = + withEmptyMetricsContext { implicit metricsContext => + val messageIdO: Option[MessageId] = event match { + case Deliver(_, _, _, messageId, _) => messageId + case DeliverError(_, _, _, messageId, _) => Some(messageId) + case _ => None + } + + messageIdO.flatMap(pendingSends.remove) foreach { sentAt => + val latency = java.time.Duration.between(sentAt.toInstant, Instant.now()) + metrics.submissions.inFlight.dec() + metrics.submissions.sequencingTime.update(latency) + lastReceivedEvent.set(Some(CantonTimestamp.now())) + } + } + + private def handle(event: OrdinarySerializedEvent): Future[Either[NotUsed, Unit]] = { + val content = event.signedEvent.content + + updateMetrics(content) + updateLastDeliver(content.counter) + + Future.successful(Right(())) + } + + val idleF: Future[EventsReceivedReport] = idleP.future + + private val subscription = + subscribe(SubscriptionRequest(member, readFrom, protocolVersion), handle) + + override protected def closeAsync(): Seq[AsyncOrSyncCloseable] = + Seq( + SyncCloseable("idleness-subscription", subscription.close()) + ) + } + + /** We're replaying sends so shouldn't allow the app to send any new ones */ + override def sendAsyncSigned( + request: SignedContent[SubmissionRequest], + timeout: Duration, + )(implicit traceContext: TraceContext): EitherT[Future, SendAsyncClientError, Unit] = + EitherT.rightT(()) + + /** We're replaying sends so shouldn't allow the app to send any new ones */ + override def sendAsyncUnauthenticatedVersioned( + request: SubmissionRequest, + timeout: Duration, + )(implicit + traceContext: TraceContext + ): EitherT[Future, SendAsyncClientError, Unit] = EitherT.rightT(()) + + override def acknowledge(request: AcknowledgeRequest)(implicit + traceContext: TraceContext + ): Future[Unit] = Future.unit + + override def acknowledgeSigned(request: SignedContent[AcknowledgeRequest])(implicit + traceContext: TraceContext + ): EitherT[Future, String, Unit] = + EitherT.rightT(()) + + override def handshake(request: HandshakeRequest)(implicit + traceContext: TraceContext + ): EitherT[Future, HandshakeRequestError, HandshakeResponse] = + EitherT.rightT(HandshakeResponse.Success(protocolVersion)) + + override def downloadTopologyStateForInit(request: TopologyStateForInitRequest)(implicit + traceContext: TraceContext + ): EitherT[Future, String, TopologyStateForInitResponse] = + EitherT.rightT(TopologyStateForInitResponse(Traced(StoredTopologyTransactionsX.empty))) + + override protected def closeAsync(): Seq[AsyncOrSyncCloseable] = Seq( + SyncCloseable("underlying-transport", underlyingTransport.close()) + ) + +} + +class ReplayingSendsSequencerClientTransportImpl( + protocolVersion: ProtocolVersion, + recordedPath: Path, + replaySendsConfig: ReplayAction.SequencerSends, + member: Member, + val underlyingTransport: SequencerClientTransport & SequencerClientTransportPekko, + requestSigner: RequestSigner, + metrics: SequencerClientMetrics, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext, materializer: Materializer) + extends ReplayingSendsSequencerClientTransportCommon( + protocolVersion, + recordedPath, + replaySendsConfig, + member, + underlyingTransport, + requestSigner, + metrics, + timeouts, + loggerFactory, + ) + with SequencerClientTransport + with SequencerClientTransportPekko { + override def subscribe[E](request: SubscriptionRequest, handler: SerializedEventHandler[E])( + implicit traceContext: TraceContext + ): SequencerSubscription[E] = new SequencerSubscription[E] { + override protected def loggerFactory: NamedLoggerFactory = + ReplayingSendsSequencerClientTransportImpl.this.loggerFactory + + override protected def timeouts: ProcessingTimeout = + ReplayingSendsSequencerClientTransportImpl.this.timeouts + + override private[canton] def complete(reason: SubscriptionCloseReason[E])(implicit + traceContext: TraceContext + ): Unit = closeReasonPromise.trySuccess(reason).discard[Boolean] + } + + override def subscribeUnauthenticated[E]( + request: SubscriptionRequest, + handler: SerializedEventHandler[E], + )(implicit traceContext: TraceContext): SequencerSubscription[E] = subscribe(request, handler) + + override def subscriptionRetryPolicy: SubscriptionErrorRetryPolicy = + SubscriptionErrorRetryPolicy.never + + override protected def subscribe( + request: SubscriptionRequest, + handler: SerializedEventHandler[NotUsed], + ): AutoCloseable = + underlyingTransport.subscribe(request, handler) + + override type SubscriptionError = underlyingTransport.SubscriptionError + + override def subscribe(request: SubscriptionRequest)(implicit + traceContext: TraceContext + ): SequencerSubscriptionPekko[SubscriptionError] = underlyingTransport.subscribe(request) + + override def subscribeUnauthenticated(request: SubscriptionRequest)(implicit + traceContext: TraceContext + ): SequencerSubscriptionPekko[SubscriptionError] = + underlyingTransport.subscribeUnauthenticated(request) + + override def subscriptionRetryPolicyPekko: SubscriptionErrorRetryPolicyPekko[SubscriptionError] = + SubscriptionErrorRetryPolicyPekko.never +} + +class ReplayingSendsSequencerClientTransportPekko( + protocolVersion: ProtocolVersion, + recordedPath: Path, + replaySendsConfig: ReplayAction.SequencerSends, + member: Member, + underlyingTransport: SequencerClientTransportPekko & SequencerClientTransport, + requestSigner: RequestSigner, + metrics: SequencerClientMetrics, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext, materializer: Materializer) +// TODO(#13789) Extend `ReplayingSendsSequencerClientTransportCommon` + extends ReplayingSendsSequencerClientTransportImpl( + protocolVersion, + recordedPath, + replaySendsConfig, + member, + underlyingTransport, + requestSigner, + metrics, + timeouts, + loggerFactory, + ) + with SequencerClientTransportPekko { + + override protected def subscribe( + request: SubscriptionRequest, + handler: SerializedEventHandler[NotUsed], + ): AutoCloseable = { + val ((killSwitch, _), doneF) = subscribe(request).source + .mapAsync(parallelism = 10)(_.unwrap.traverse { event => + handler(event) + }) + .watchTermination()(Keep.both) + .to(Sink.ignore) + .run() + new AutoCloseable { + override def close(): Unit = { + killSwitch.shutdown() + timeouts.closing.await_("closing subscription")(doneF) + } + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/CleanSequencerCounterTracker.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/CleanSequencerCounterTracker.scala new file mode 100644 index 0000000000..9ffccd2e44 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/CleanSequencerCounterTracker.scala @@ -0,0 +1,114 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.handlers + +import com.digitalasset.canton.SequencerCounter +import com.digitalasset.canton.data.{ + CantonTimestamp, + Counter, + PeanoQueue, + SynchronizedPeanoTreeQueue, +} +import com.digitalasset.canton.lifecycle.CloseContext +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.sequencing.protocol.Envelope +import com.digitalasset.canton.sequencing.{HandlerResult, PossiblyIgnoredApplicationHandler} +import com.digitalasset.canton.store.CursorPrehead.SequencerCounterCursorPrehead +import com.digitalasset.canton.store.{CursorPrehead, SequencerCounterTrackerStore} +import com.digitalasset.canton.tracing.{TraceContext, Traced} +import com.digitalasset.canton.util.TryUtil.* + +import java.util.concurrent.atomic.AtomicLong +import scala.concurrent.{ExecutionContext, Future} +import scala.util.Try + +/** Application handler transformer that tracks the sequencer counters for which the + * given application handler has successfully completed the asynchronous processing. + * + * @param onUpdate Handler to be called after the clean sequencer counter prehead has been updated. + * Calls are synchronized only in the sense that the supplied prehead is at least the persisted prehead + * (except for rewinding during crash recovery), but the observed preheads need not increase + * monotonically from the handler's perspective due to out-of-order execution of futures. + */ +class CleanSequencerCounterTracker( + store: SequencerCounterTrackerStore, + onUpdate: Traced[SequencerCounterCursorPrehead] => Unit, + override protected val loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends NamedLogging { + + /** Counter for the batches of events that we hand out to the application handler. */ + private case object EventBatchCounterDiscriminator + private type EventBatchCounterDiscriminator = EventBatchCounterDiscriminator.type + private type EventBatchCounter = Counter[EventBatchCounterDiscriminator] + + /** The counter for the next batch of events that goes to the application handler. + * The [[EventBatchCounter]] is not persisted anywhere and can therefore be reset upon a restart. + */ + private val eventBatchCounterRef: AtomicLong = new AtomicLong(0L) + + /** A Peano queue to track the event batches that have been processed successfully (both synchronously and asynchronously). + * The [[SequencerCounter]] belongs to the last event in the corresponding event batch. + */ + private val eventBatchQueue + : PeanoQueue[EventBatchCounter, Traced[SequencerCounterCursorPrehead]] = + new SynchronizedPeanoTreeQueue[ + EventBatchCounterDiscriminator, + Traced[SequencerCounterCursorPrehead], + ](Counter[EventBatchCounterDiscriminator](0L)) + + def apply[E <: Envelope[_]]( + handler: PossiblyIgnoredApplicationHandler[E] + )(implicit callerCloseContext: CloseContext): PossiblyIgnoredApplicationHandler[E] = + handler.replace { tracedEvents => + tracedEvents.withTraceContext { implicit batchTraceContext => events => + events.lastOption match { + case None => HandlerResult.done // ignore empty event batches + case Some(lastEvent) => + val lastSc = lastEvent.counter + val lastTs = lastEvent.timestamp + val eventBatchCounter = allocateEventBatchCounter() + handler(tracedEvents).map { asyncF => + val asyncFSignalled = asyncF.andThenF { case () => + store.performUnlessClosingF("signal-clean-event-batch")( + signalCleanEventBatch(eventBatchCounter, lastSc, lastTs) + ) + } + asyncFSignalled + } + } + } + } + + private[this] def allocateEventBatchCounter(): EventBatchCounter = + Counter[EventBatchCounterDiscriminator](eventBatchCounterRef.getAndIncrement()) + + private[this] def signalCleanEventBatch( + eventBatchCounter: EventBatchCounter, + lastSc: SequencerCounter, + lastTs: CantonTimestamp, + )(implicit traceContext: TraceContext, callerCloseContext: CloseContext): Future[Unit] = { + val atLeastHead = + eventBatchQueue.insert(eventBatchCounter, Traced(CursorPrehead(lastSc, lastTs))) + if (!atLeastHead) { + logger.debug(s"Ignoring event batch counter $eventBatchCounter") + } + // Update the store if we can advance the cursor + drainAndUpdate() + } + + private[this] def drainAndUpdate()(implicit callerCloseContext: CloseContext): Future[Unit] = + eventBatchQueue.dropUntilFront() match { + case None => Future.unit + case Some((_, tracedPrehead)) => + tracedPrehead.withTraceContext { implicit traceContext => prehead => + store.advancePreheadSequencerCounterTo(prehead).map { _ => + // Signal the new prehead and make sure that the update handler cannot interfere by throwing exceptions + Try(onUpdate(tracedPrehead)).forFailed { ex => + logger.error("onUpdate handler failed", ex) + } + } + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/CounterCapture.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/CounterCapture.scala new file mode 100644 index 0000000000..6e0ad9722c --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/CounterCapture.scala @@ -0,0 +1,48 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.handlers + +import com.digitalasset.canton.SequencerCounter +import com.digitalasset.canton.concurrent.DirectExecutionContext +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.sequencing.SerializedEventHandler +import com.digitalasset.canton.tracing.TraceContext + +import java.util.concurrent.atomic.AtomicLong +import scala.concurrent.ExecutionContext + +/** Capture the sequencer counter of the last successfully processed event (only synchronous processing). + * @param initial Initial counter to return until a event is successfully processed. + */ +class CounterCapture( + private val initial: SequencerCounter, + protected val loggerFactory: NamedLoggerFactory, +) extends NamedLogging { + + private val currentValue = new AtomicLong(initial.v) + + /** Wrap a handler and capture the counter of a successfully processed event. + * It only makes sense to wrap a single handler however this is not enforced. + */ + def apply[E](handler: SerializedEventHandler[E]): SerializedEventHandler[E] = { + implicit val ec: ExecutionContext = DirectExecutionContext(noTracingLogger) + event => { + implicit val traceContext: TraceContext = event.traceContext + for { + result <- handler(event) + } yield { + // only update if successful + result foreach { _ => + val counter = event.counter + currentValue.set(counter.v) + logger.trace(s"Captured sequencer counter ${counter}") + } + result + } + } + } + + /** Get the latest offset. */ + def counter: SequencerCounter = SequencerCounter(currentValue.get) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/HasReceivedEvent.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/HasReceivedEvent.scala new file mode 100644 index 0000000000..43b97a57ab --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/HasReceivedEvent.scala @@ -0,0 +1,38 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.handlers + +import com.digitalasset.canton.sequencing.SerializedEventHandler + +import java.util.concurrent.atomic.AtomicBoolean +import scala.concurrent.{Future, Promise} + +class HasReceivedEvent { + + private val promise = Promise[Unit]() + private val received = new AtomicBoolean(false) + + def hasReceivedEvent: Boolean = received.get() + + def awaitEvent: Future[Unit] = promise.future +} + +/** Capture whether the handler has been supplied an event (not whether it has been successfully processed) + */ +object HasReceivedEvent { + def apply[E]( + handler: SerializedEventHandler[E] + ): (HasReceivedEvent, SerializedEventHandler[E]) = { + val hasReceivedEvent = new HasReceivedEvent + + ( + hasReceivedEvent, + event => { + if (!hasReceivedEvent.received.getAndSet(true)) + hasReceivedEvent.promise.success(()) + handler(event) + }, + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/StoreSequencedEvent.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/StoreSequencedEvent.scala new file mode 100644 index 0000000000..c2f5f2af42 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/StoreSequencedEvent.scala @@ -0,0 +1,59 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.handlers + +import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.sequencing.OrdinaryApplicationHandler +import com.digitalasset.canton.sequencing.protocol.ClosedEnvelope +import com.digitalasset.canton.store.SequencedEventStore +import com.digitalasset.canton.topology.DomainId +import com.digitalasset.canton.util.ErrorUtil +import com.digitalasset.canton.util.ShowUtil.* + +import scala.concurrent.ExecutionContext + +/** Transformer for [[com.digitalasset.canton.sequencing.OrdinaryApplicationHandler]] + * that stores all event batches in the [[com.digitalasset.canton.store.SequencedEventStore]] + * before passing them on to the given handler. Complains if events have the wrong domain ID. + */ +class StoreSequencedEvent( + store: SequencedEventStore, + domainId: DomainId, + protected override val loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends NamedLogging { + + def apply( + handler: OrdinaryApplicationHandler[ClosedEnvelope] + )(implicit closeContext: CloseContext): OrdinaryApplicationHandler[ClosedEnvelope] = + handler.replace(tracedEvents => + tracedEvents.withTraceContext { implicit batchTraceContext => events => + val wrongDomainEvents = events.filter(_.signedEvent.content.domainId != domainId) + for { + _ <- FutureUnlessShutdown.outcomeF( + ErrorUtil.requireArgumentAsync( + wrongDomainEvents.isEmpty, { + val wrongDomainIds = wrongDomainEvents.map(_.signedEvent.content.domainId).distinct + val wrongDomainCounters = wrongDomainEvents.map(_.signedEvent.content.counter) + show"Cannot store sequenced events from domains $wrongDomainIds in store for domain $domainId\nSequencer counters: $wrongDomainCounters" + }, + ) + ) + // The events must be stored before we call the handler + // so that during crash recovery the `SequencerClient` can use the first event in the + // `SequencedEventStore` as the beginning of the resubscription even if that event is not known to be clean. + _ <- FutureUnlessShutdown.outcomeF(store.store(events)) + result <- handler(tracedEvents) + } yield result + } + ) +} + +object StoreSequencedEvent { + def apply(store: SequencedEventStore, domainId: DomainId, loggerFactory: NamedLoggerFactory)( + implicit ec: ExecutionContext + ): StoreSequencedEvent = + new StoreSequencedEvent(store, domainId, loggerFactory) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/ThrottlingApplicationEventHandler.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/ThrottlingApplicationEventHandler.scala new file mode 100644 index 0000000000..17672f76f1 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handlers/ThrottlingApplicationEventHandler.scala @@ -0,0 +1,61 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.handlers + +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.lifecycle.UnlessShutdown +import com.digitalasset.canton.metrics.SequencerClientMetrics +import com.digitalasset.canton.sequencing.ApplicationHandler +import com.digitalasset.canton.sequencing.protocol.Envelope +import com.digitalasset.canton.util.Thereafter.syntax.* + +import java.util.concurrent.{ArrayBlockingQueue, BlockingQueue} +import scala.concurrent.{ExecutionContext, blocking} +import scala.util.Success + +object ThrottlingApplicationEventHandler { + + private case object Token + + def throttle[Box[+_ <: Envelope[?]], E <: Envelope[?]]( + maximumInFlightEventBatches: PositiveInt, + handler: ApplicationHandler[Box, E], + metrics: SequencerClientMetrics, + )(implicit + ec: ExecutionContext + ): ApplicationHandler[Box, E] = { + metrics.handler.maxInFlightEventBatches.updateValue(maximumInFlightEventBatches.unwrap) + /* + It would be better if we didn't have to block a thread completely and instead + could use Promise / Future, but given that there can be at most one thread blocked + at a time due to the sequencer client calling this stuff sequentially, + simplicity beats pre-mature optimization here. + */ + val queue: BlockingQueue[Token.type] = + new ArrayBlockingQueue[Token.type]( + maximumInFlightEventBatches.unwrap + ) + + handler.replace { tracedEvents => + blocking { queue.put(Token) } + metrics.handler.actualInFlightEventBatches.inc() + handler(tracedEvents) + .map { asyncF => + // this will ensure that we unregister from the queue once the inner future asyncF is done, whether + // it's a success, a shutdown or an exception that is thrown + asyncF.thereafter { _ => + queue.remove().discard + metrics.handler.actualInFlightEventBatches.dec() + } + } + .thereafter { + case Success(UnlessShutdown.Outcome(_)) => + // don't forget to unblock other threads on shutdown or exception of the outer future such that we don't block other threads + case _ => + queue.remove().discard + metrics.handler.actualInFlightEventBatches.dec() + } + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handshake/HandshakeRequestError.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handshake/HandshakeRequestError.scala new file mode 100644 index 0000000000..3f10cc77bc --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handshake/HandshakeRequestError.scala @@ -0,0 +1,10 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.handshake + +/** Failed to obtain a handshake response from the server. + * @param message loggable message for why the handshake failed + * @param retryable does the transport suggest retrying the handshake + */ +final case class HandshakeRequestError(message: String, retryable: Boolean = false) diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handshake/SequencerHandshake.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handshake/SequencerHandshake.scala new file mode 100644 index 0000000000..7d12300aa9 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handshake/SequencerHandshake.scala @@ -0,0 +1,113 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.handshake + +import cats.data.EitherT +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.lifecycle.FlagCloseable +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.sequencing.client.SequencerClientConfig +import com.digitalasset.canton.sequencing.protocol.{HandshakeRequest, HandshakeResponse} +import com.digitalasset.canton.tracing.TraceContext.withNewTraceContext +import com.digitalasset.canton.tracing.{TraceContext, Traced} +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.retry +import com.digitalasset.canton.util.retry.RetryUtil.AllExnRetryable +import com.digitalasset.canton.util.retry.Success +import com.digitalasset.canton.version.ProtocolVersion + +import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{ExecutionContext, Future} + +/** Performs the sequencer handshake. */ +class SequencerHandshake( + clientVersions: Seq[ProtocolVersion], + minimumProtocolVersion: Option[ProtocolVersion], + fetchHandshake: Traced[HandshakeRequest] => EitherT[ + Future, + HandshakeRequestError, + HandshakeResponse, + ], + retryAttempts: Int, + retryDelay: FiniteDuration, + override protected val timeouts: ProcessingTimeout, + protected val loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext) + extends NamedLogging + with FlagCloseable { + def handshake: EitherT[Future, String, Unit] = withNewTraceContext { implicit traceContext => + logger.debug("Starting sequencer handshake") + + for { + response <- repeatedlyTryToFetchHandshake + .leftMap(err => s"Failed to fetch handshake response from sequencer server: $err") + // determine whether the server accepted our handshake, log the outcome + _ <- EitherT.fromEither[Future]( + response match { + case HandshakeResponse.Success(serverVersion) => + logger.debug( + s"Sequencer handshake successful (client v$clientVersions, server v$serverVersion)" + ) + Right(()) + case HandshakeResponse.Failure(serverVersion, reason) => + logger.warn( + s"Sequencer handshake failed: $reason (client versions: ${clientVersions + .mkString(", ")}, server: $serverVersion)" + ) + Left(reason) + } + ) + } yield () + } + + private def repeatedlyTryToFetchHandshake(implicit + traceContext: TraceContext + ): EitherT[Future, HandshakeRequestError, HandshakeResponse] = { + implicit val completed = Success[Either[HandshakeRequestError, HandshakeResponse]] { + case Right(_) => true // we've got a response + case Left(HandshakeRequestError(_, shouldRetry)) => + !shouldRetry // if we have a unretryable error we should finish immediately + } + val request = HandshakeRequest(clientVersions, minimumProtocolVersion) + + EitherT { + retry + .Pause(logger, this, maxRetries = retryAttempts, delay = retryDelay, "fetch handshake") + .apply( + { + logger.trace("Attempting sequencer handshake") + fetchHandshake(Traced(request)).value + }, + AllExnRetryable, + ) + } + } +} + +object SequencerHandshake { + + def handshake( + supportedProtocols: Seq[ProtocolVersion], + minimumProtocolVersion: Option[ProtocolVersion], + client: SupportsHandshake, + config: SequencerClientConfig, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit executionContext: ExecutionContext): EitherT[Future, String, Unit] = { + val sequencerHandshake = + new SequencerHandshake( + supportedProtocols, + minimumProtocolVersion, + Traced.lift(client.handshake(_)(_)), + config.handshakeRetryAttempts.unwrap, + config.handshakeRetryDelay.underlying, + timeouts, + loggerFactory, + ) + + sequencerHandshake.handshake.thereafter { _ => + sequencerHandshake.close() + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handshake/SupportsHandshake.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handshake/SupportsHandshake.scala new file mode 100644 index 0000000000..04093288a5 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/handshake/SupportsHandshake.scala @@ -0,0 +1,21 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.handshake + +import cats.data.EitherT +import com.digitalasset.canton.sequencing.protocol.{HandshakeRequest, HandshakeResponse} +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.Future + +/** Shared interface for checking the version of the sequencer service that a client providing sequencer operations will be calling. */ +trait SupportsHandshake { + + /** Attempt to obtain a handshake response from the sequencer server. + * Can indicate with the error if the error is transient and may be retried by the caller. + */ + def handshake(request: HandshakeRequest)(implicit + traceContext: TraceContext + ): EitherT[Future, HandshakeRequestError, HandshakeResponse] +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/package.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/package.scala new file mode 100644 index 0000000000..9b43990591 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/package.scala @@ -0,0 +1,118 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton + +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.protocol.messages.DefaultOpenEnvelope +import com.digitalasset.canton.sequencing.client.SequencerSubscriptionError.SequencedEventError +import com.digitalasset.canton.sequencing.protocol.{ + ClosedEnvelope, + Envelope, + SequencedEvent, + SignedContent, +} +import com.digitalasset.canton.store.SequencedEventStore.{ + OrdinarySequencedEvent, + PossiblyIgnoredSequencedEvent, +} +import com.digitalasset.canton.tracing.Traced + +import scala.concurrent.Future + +package object sequencing { + + /** It is convenient to consider the envelopes and all the structure around the envelopes (the box). + * [[EnvelopeBox]] defines type class operations to manipulate + */ + type BoxedEnvelope[+Box[+_ <: Envelope[_]], +Env <: Envelope[_]] = Box[Env] + + /** A handler processes an event synchronously in the [[scala.concurrent.Future]] + * and returns an [[AsyncResult]] that may be computed asynchronously by the contained future. + * Asynchronous processing may run concurrently with later events' synchronous processing + * and with asynchronous processing of other events. + */ + type HandlerResult = FutureUnlessShutdown[AsyncResult] + + /////////////////////////////// + // The boxes and their handlers + /////////////////////////////// + + /** Default box for signed batches of events + * The outer `Traced` contains a trace context for the entire batch. + */ + type OrdinaryEnvelopeBox[+E <: Envelope[_]] = Traced[Seq[OrdinarySequencedEvent[E]]] + type OrdinaryApplicationHandler[-E <: Envelope[_]] = ApplicationHandler[OrdinaryEnvelopeBox, E] + + /** Just a signature around the [[com.digitalasset.canton.sequencing.protocol.SequencedEvent]] + * The term "raw" indicates that the trace context is missing. + * Try to use the box [[OrdinarySerializedEvent]] instead. + */ + type RawSignedContentEnvelopeBox[+Env <: Envelope[_]] = SignedContent[SequencedEvent[Env]] + + /** A batch of traced protocol events (without a signature). + * The outer `Traced` contains a trace context for the entire batch. + */ + type UnsignedEnvelopeBox[+E <: Envelope[_]] = Traced[Seq[Traced[SequencedEvent[E]]]] + type UnsignedApplicationHandler[-E <: Envelope[_]] = ApplicationHandler[UnsignedEnvelopeBox, E] + type UnsignedProtocolEventHandler = UnsignedApplicationHandler[DefaultOpenEnvelope] + + type NoEnvelopeBox[+E <: Envelope[_]] = Traced[Seq[E]] + type EnvelopeHandler = ApplicationHandler[NoEnvelopeBox, DefaultOpenEnvelope] + + /** Default box for `PossiblyIgnoredProtocolEvents`. + * The outer `Traced` contains a trace context for the entire batch. + */ + type PossiblyIgnoredEnvelopeBox[+E <: Envelope[_]] = Traced[Seq[PossiblyIgnoredSequencedEvent[E]]] + type PossiblyIgnoredApplicationHandler[-E <: Envelope[_]] = + ApplicationHandler[PossiblyIgnoredEnvelopeBox, E] + + /////////////////////////////////// + // Serialized events in their boxes + /////////////////////////////////// + + /** Default type for serialized events. + * Contains trace context and signature. + */ + type OrdinarySerializedEvent = BoxedEnvelope[OrdinarySequencedEvent, ClosedEnvelope] + + type PossiblyIgnoredSerializedEvent = BoxedEnvelope[PossiblyIgnoredSequencedEvent, ClosedEnvelope] + + type OrdinarySerializedEventOrError = Either[SequencedEventError, OrdinarySerializedEvent] + + ///////////////////////////////// + // Protocol events (deserialized) + ///////////////////////////////// + + /** Default type for deserialized events. + * Includes a signature and a trace context. + */ + type OrdinaryProtocolEvent = BoxedEnvelope[OrdinarySequencedEvent, DefaultOpenEnvelope] + + /** Deserialized event with optional payload. */ + type PossiblyIgnoredProtocolEvent = + BoxedEnvelope[PossiblyIgnoredSequencedEvent, DefaultOpenEnvelope] + + /** Default type for deserialized events. + * The term "raw" indicates that the trace context is missing. + * Try to use `TracedProtocolEvent` instead. + */ + type RawProtocolEvent = BoxedEnvelope[SequencedEvent, DefaultOpenEnvelope] + + /** Deserialized event with a trace context. + * Use this when you are really sure that a signature will never be needed. + */ + type TracedProtocolEvent = Traced[RawProtocolEvent] + + ////////////////////////////// + // Non-standard event handlers + ////////////////////////////// + + // Non-standard handlers do not return a `HandlerResult` + + /** Default type for handlers on serialized events with error reporting + */ + type SerializedEventHandler[Err] = OrdinarySerializedEvent => Future[Either[Err, Unit]] + type SerializedEventOrErrorHandler[Err] = + OrdinarySerializedEventOrError => Future[Either[Err, Unit]] +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/AcknowledgeRequest.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/AcknowledgeRequest.scala new file mode 100644 index 0000000000..13e7902426 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/AcknowledgeRequest.scala @@ -0,0 +1,73 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import cats.syntax.option.* +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.domain.api.v0 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{ProtoConverter, ProtocolVersionedMemoizedEvidence} +import com.digitalasset.canton.topology.Member +import com.digitalasset.canton.version.{ + HasMemoizedProtocolVersionedWrapperCompanion, + HasProtocolVersionedWrapper, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} +import com.google.protobuf.ByteString + +final case class AcknowledgeRequest private (member: Member, timestamp: CantonTimestamp)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + AcknowledgeRequest.type + ], + override val deserializedFrom: Option[ByteString] = None, +) extends HasProtocolVersionedWrapper[AcknowledgeRequest] + with ProtocolVersionedMemoizedEvidence { + def toProtoV0: v0.AcknowledgeRequest = + v0.AcknowledgeRequest(member.toProtoPrimitive, timestamp.toProtoPrimitive.some) + + override protected[this] def toByteStringUnmemoized: ByteString = + super[HasProtocolVersionedWrapper].toByteString + + @transient override protected lazy val companionObj: AcknowledgeRequest.type = AcknowledgeRequest +} + +object AcknowledgeRequest extends HasMemoizedProtocolVersionedWrapperCompanion[AcknowledgeRequest] { + def apply( + member: Member, + timestamp: CantonTimestamp, + protocolVersion: ProtocolVersion, + ): AcknowledgeRequest = + AcknowledgeRequest(member, timestamp)(protocolVersionRepresentativeFor(protocolVersion)) + + override def name: String = "AcknowledgeRequest" + + override def supportedProtoVersions: SupportedProtoVersions = + SupportedProtoVersions( + ProtoVersion(0) -> VersionedProtoConverter(ProtocolVersion.v30)(v0.AcknowledgeRequest)( + supportedProtoVersionMemoized(_) { req => bytes => + fromProtoV0(req)(Some(bytes)) + }, + _.toProtoV0.toByteString, + ) + ) + + private def fromProtoV0( + reqP: v0.AcknowledgeRequest + )(deserializedFrom: Option[ByteString]): ParsingResult[AcknowledgeRequest] = + for { + member <- Member.fromProtoPrimitive(reqP.member, "member") + timestamp <- ProtoConverter.parseRequired( + CantonTimestamp.fromProtoPrimitive, + "timestamp", + reqP.timestamp, + ) + } yield { + AcknowledgeRequest(member, timestamp)( + protocolVersionRepresentativeFor(ProtoVersion(0)), + deserializedFrom, + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/AggregationId.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/AggregationId.scala new file mode 100644 index 0000000000..97f7c9d3d1 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/AggregationId.scala @@ -0,0 +1,37 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import cats.syntax.either.* +import com.digitalasset.canton.crypto.Hash +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.store.db.DbDeserializationException +import com.google.protobuf.ByteString +import slick.jdbc.{GetResult, SetParameter} + +final case class AggregationId(id: Hash) extends PrettyPrinting { + override def pretty: Pretty[AggregationId] = prettyOfParam(_.id) + + def toProtoPrimitive: ByteString = id.getCryptographicEvidence +} + +object AggregationId { + // We serialize/deserialize aggregation IDs as hex strings rather than bytestrings so that they can be used as primary keys in all DBs + implicit val setParameterAggregationId: SetParameter[AggregationId] = (v, pp) => + pp.>>(v.id.toLengthLimitedHexString) + + implicit val getResultAggregationId: GetResult[AggregationId] = GetResult { r => + val hex = r.nextString() + val hash = Hash + .fromHexString(hex) + .valueOr(err => + throw new DbDeserializationException(s"Could not deserialize aggregation id: $err") + ) + AggregationId(hash) + } + + def fromProtoPrimitive(bytes: ByteString): ParsingResult[AggregationId] = + Hash.fromProtoPrimitive(bytes).map(id => AggregationId(id)) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/AggregationRule.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/AggregationRule.scala new file mode 100644 index 0000000000..d16eef30fd --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/AggregationRule.scala @@ -0,0 +1,99 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.v0 +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.Member +import com.digitalasset.canton.version.{ + HasProtocolVersionedCompanion, + HasProtocolVersionedWrapper, + ProtoVersion, + ProtocolVersion, + ProtocolVersionedCompanionDbHelpers, + RepresentativeProtocolVersion, +} + +/** Encodes the conditions on when an aggregatable submission request's envelopes are sequenced and delivered. + * + * Aggregatable submissions are grouped by their [[SubmissionRequest.aggregationId]]. + * An aggregatable submission's envelopes are delivered to their recipients when the [[threshold]]'s + * submission request in its group has been sequenced. The aggregatable submission request that triggers the threshold + * defines the sequencing timestamp (and thus the sequencer counters) for all delivered envelopes. + * The sender of an aggregatable submission request receives a receipt of delivery immediately when its request was sequenced, + * not when its envelopes were delivered. When the envelopes are actually delivered, no further delivery receipt is sent. + * + * So a threshold of 1 means that no aggregation takes place and the event is sequenced immediately. + * In this case, one can completely omit the aggregation rule in the submission request. + */ +final case class AggregationRule( + // TODO(#12075) This is a `Seq` rather than a `Set` just because we then have to worry less about deterministic serialization. + // Change it to a set. + eligibleSenders: NonEmpty[Seq[Member]], + threshold: PositiveInt, +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[AggregationRule.type] +) extends HasProtocolVersionedWrapper[AggregationRule] + with PrettyPrinting { + @transient override protected lazy val companionObj: AggregationRule.type = AggregationRule + + private[canton] def toProtoV0: v0.AggregationRule = v0.AggregationRule( + eligibleMembers = eligibleSenders.map(_.toProtoPrimitive), + threshold = threshold.value, + ) + + override def pretty: Pretty[this.type] = prettyOfClass( + param("threshold", _.threshold), + param("eligible members", _.eligibleSenders), + ) + + def copy( + eligibleMembers: NonEmpty[Seq[Member]] = this.eligibleSenders, + threshold: PositiveInt = this.threshold, + ): AggregationRule = + AggregationRule(eligibleMembers, threshold)(representativeProtocolVersion) +} + +object AggregationRule + extends HasProtocolVersionedCompanion[AggregationRule] + with ProtocolVersionedCompanionDbHelpers[AggregationRule] { + def apply( + eligibleMembers: NonEmpty[Seq[Member]], + threshold: PositiveInt, + protocolVersion: ProtocolVersion, + ): AggregationRule = + AggregationRule(eligibleMembers, threshold)(protocolVersionRepresentativeFor(protocolVersion)) + + def apply( + eligibleMembers: NonEmpty[Seq[Member]], + threshold: PositiveInt, + protoVersion: ProtoVersion, + ): AggregationRule = + AggregationRule(eligibleMembers, threshold)(protocolVersionRepresentativeFor(protoVersion)) + + override def name: String = "AggregationRule" + + override def supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> VersionedProtoConverter(ProtocolVersion.v30)(v0.AggregationRule)( + supportedProtoVersion(_)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + private[canton] def fromProtoV0(proto: v0.AggregationRule): ParsingResult[AggregationRule] = { + val v0.AggregationRule(eligibleMembersP, thresholdP) = proto + for { + eligibleMembers <- ProtoConverter.parseRequiredNonEmpty( + Member.fromProtoPrimitive(_, "eligible_members"), + "eligible_members", + eligibleMembersP, + ) + threshold <- ProtoConverter.parsePositiveInt(thresholdP) + } yield AggregationRule(eligibleMembers, threshold, ProtoVersion(0)) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Batch.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Batch.scala new file mode 100644 index 0000000000..742aea91cd --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Batch.scala @@ -0,0 +1,185 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import cats.Applicative +import cats.implicits.* +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.ProtoDeserializationError.FieldNotSet +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.crypto.HashOps +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.messages.ProtocolMessage +import com.digitalasset.canton.protocol.{v0, v1} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.{MediatorId, Member} +import com.digitalasset.canton.util.ByteStringUtil +import com.digitalasset.canton.version.{ + HasProtocolVersionedCompanion2, + HasProtocolVersionedWrapper, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.ByteString + +/** A '''batch''' is a a list of `n` tuples `(m`,,i,,` , recipients`,,i,,), + * where `m`,,i,, is a message, and + * `recipients`,,i,, is the list of recipients of m,,i,,, + * for `0 <= i < n`. + */ +final case class Batch[+Env <: Envelope[_]] private (envelopes: List[Env])( + override val representativeProtocolVersion: RepresentativeProtocolVersion[Batch.type] +) extends HasProtocolVersionedWrapper[Batch[Envelope[_]]] + with PrettyPrinting { + + @transient override protected lazy val companionObj: Batch.type = Batch + + /** builds a set of recipients from all messages in this message batch + */ + lazy val allMembers: Set[Member] = allRecipients.collect { case MemberRecipient(member) => + member + } + + lazy val allRecipients: Set[Recipient] = envelopes.flatMap { e => + e.recipients.allRecipients + }.toSet + + lazy val allMediatorRecipients: Set[Recipient] = { + allRecipients.collect { + case r @ MemberRecipient(_: MediatorId) => r + case r: MediatorsOfDomain => r + case AllMembersOfDomain => AllMembersOfDomain + } + } + + private[protocol] def toProtoV1: v1.CompressedBatch = { + val batch = v1.Batch(envelopes = envelopes.map(_.closeEnvelope.toProtoV1)) + val compressed = ByteStringUtil.compressGzip(batch.toByteString) + v1.CompressedBatch( + algorithm = v0.CompressedBatch.CompressionAlgorithm.Gzip, + compressedBatch = compressed, + ) + } + + def map[Env2 <: Envelope[_]](f: Env => Env2): Batch[Env2] = + Batch(envelopes.map(f))(representativeProtocolVersion) + + def copy[Env2 <: Envelope[_]](envelopes: List[Env2]): Batch[Env2] = + Batch(envelopes)(representativeProtocolVersion) + + def envelopesCount: Int = envelopes.size + + private[sequencing] def traverse[F[_], Env2 <: Envelope[_]](f: Env => F[Env2])(implicit + F: Applicative[F] + ): F[Batch[Env2]] = + F.map(envelopes.traverse(f))(Batch(_)(representativeProtocolVersion)) + + override def pretty: Pretty[Batch[Envelope[_]]] = prettyOfClass(unnamedParam(_.envelopes)) +} + +object Batch extends HasProtocolVersionedCompanion2[Batch[Envelope[_]], Batch[ClosedEnvelope]] { + override def name: String = "Batch" + + override val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter( + ProtocolVersion.v30 + )(v1.CompressedBatch)( + supportedProtoVersion(_)( + // TODO(i10428) Prevent zip bombing when decompressing the request + Batch.fromProtoV1(_, maxRequestSize = MaxRequestSizeToDeserialize.NoLimit) + ), + _.toProtoV1.toByteString, + ) + ) + + def apply[Env <: Envelope[_]]( + envelopes: List[Env], + protocolVersion: ProtocolVersion, + ): Batch[Env] = Batch(envelopes)(protocolVersionRepresentativeFor(protocolVersion)) + + def of[M <: ProtocolMessage]( + protocolVersion: ProtocolVersion, + envs: (M, Recipients)* + ): Batch[OpenEnvelope[M]] = { + val envelopes = envs.map { case (m, recipients) => + OpenEnvelope[M](m, recipients)(protocolVersion) + }.toList + Batch[OpenEnvelope[M]](envelopes)(protocolVersionRepresentativeFor(protocolVersion)) + } + + @VisibleForTesting def fromClosed( + protocolVersion: ProtocolVersion, + envelopes: ClosedEnvelope* + ): Batch[ClosedEnvelope] = + Batch(envelopes.toList)(protocolVersionRepresentativeFor(protocolVersion)) + + private[protocol] def fromProtoV1( + batchProto: v1.CompressedBatch, + maxRequestSize: MaxRequestSizeToDeserialize, + ): ParsingResult[Batch[ClosedEnvelope]] = { + val v1.CompressedBatch(algorithm, compressed) = batchProto + + for { + uncompressed <- decompress(algorithm, compressed, maxRequestSize.toOption) + uncompressedBatchProto <- ProtoConverter.protoParser(v1.Batch.parseFrom)(uncompressed) + v1.Batch(envelopesProto) = uncompressedBatchProto + envelopes <- envelopesProto.toList.traverse(ClosedEnvelope.fromProtoV1) + } yield Batch[ClosedEnvelope](envelopes)(protocolVersionRepresentativeFor(ProtoVersion(1))) + } + + private def decompress( + algorithm: v0.CompressedBatch.CompressionAlgorithm, + compressed: ByteString, + maxRequestSize: Option[NonNegativeInt], + ): ParsingResult[ByteString] = { + algorithm match { + case v0.CompressedBatch.CompressionAlgorithm.None => Right(compressed) + case v0.CompressedBatch.CompressionAlgorithm.Gzip => + ByteStringUtil + .decompressGzip(compressed, maxBytesLimit = maxRequestSize.map(_.unwrap)) + .leftMap(_.toProtoDeserializationError) + case _ => Left(FieldNotSet("CompressedBatch.Algorithm")) + } + } + + /** Constructs a batch with no envelopes */ + def empty[Env <: Envelope[_]](protocolVersion: ProtocolVersion): Batch[Env] = + Batch(List.empty[Env])(protocolVersionRepresentativeFor(protocolVersion)) + + def filterClosedEnvelopesFor( + batch: Batch[ClosedEnvelope], + member: Member, + groupRecipients: Set[GroupRecipient], + ): Batch[ClosedEnvelope] = { + val newEnvs = batch.envelopes.mapFilter(e => e.forRecipient(member, groupRecipients)) + Batch(newEnvs)(batch.representativeProtocolVersion) + } + + def filterOpenEnvelopesFor[T <: ProtocolMessage]( + batch: Batch[OpenEnvelope[T]], + member: Member, + groupRecipients: Set[GroupRecipient], + ): Batch[OpenEnvelope[T]] = { + val newEnvs = batch.envelopes.mapFilter(e => e.forRecipient(member, groupRecipients)) + Batch(newEnvs)(batch.representativeProtocolVersion) + } + + def closeEnvelopes[T <: ProtocolMessage](batch: Batch[OpenEnvelope[T]]): Batch[ClosedEnvelope] = { + val closedEnvs = batch.envelopes.map(env => env.closeEnvelope) + Batch(closedEnvs)(batch.representativeProtocolVersion) + } + + def openEnvelopes(batch: Batch[ClosedEnvelope])( + protocolVersion: ProtocolVersion, + hashOps: HashOps, + ): (Batch[OpenEnvelope[ProtocolMessage]], Seq[ProtoDeserializationError]) = { + val (openingErrors, openEnvelopes) = + batch.envelopes.map(_.openEnvelope(hashOps, protocolVersion)).separate + + (Batch(openEnvelopes)(batch.representativeProtocolVersion), openingErrors) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/ClosedEnvelope.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/ClosedEnvelope.scala new file mode 100644 index 0000000000..63965dab2f --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/ClosedEnvelope.scala @@ -0,0 +1,225 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import cats.data.EitherT +import cats.syntax.either.* +import cats.syntax.foldable.* +import cats.syntax.traverse.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.crypto.{ + HashOps, + HashPurpose, + Signature, + SignatureCheckError, + SyncCryptoApi, +} +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.protocol.messages.{ + DefaultOpenEnvelope, + EnvelopeContent, + ProtocolMessage, + SignedProtocolMessage, + TypedSignedProtocolMessageContent, +} +import com.digitalasset.canton.protocol.v1 +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex +import com.digitalasset.canton.topology.Member +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.{ + HasProtocolVersionedCompanion, + HasProtocolVersionedWrapper, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.ByteString +import monocle.Lens + +import scala.concurrent.{ExecutionContext, Future} + +/** A [[ClosedEnvelope]]'s contents are serialized as a [[com.google.protobuf.ByteString]]. + * + * The serialization is interpreted as a [[com.digitalasset.canton.protocol.messages.EnvelopeContent]] + * if `signatures` are empty, and as a [[com.digitalasset.canton.protocol.messages.TypedSignedProtocolMessageContent]] otherwise. + * It itself is serialized without version wrappers inside a [[Batch]]. + */ +final case class ClosedEnvelope private ( + bytes: ByteString, + override val recipients: Recipients, + signatures: Seq[Signature], +)(override val representativeProtocolVersion: RepresentativeProtocolVersion[ClosedEnvelope.type]) + extends Envelope[ByteString] + with HasProtocolVersionedWrapper[ClosedEnvelope] { + + @transient override protected lazy val companionObj: ClosedEnvelope.type = ClosedEnvelope + + def openEnvelope( + hashOps: HashOps, + protocolVersion: ProtocolVersion, + ): ParsingResult[DefaultOpenEnvelope] = { + NonEmpty.from(signatures) match { + case None => + EnvelopeContent.fromByteString(protocolVersion)(hashOps)(bytes).map { envelopeContent => + OpenEnvelope(envelopeContent.message, recipients)(protocolVersion) + } + case Some(signaturesNE) => + TypedSignedProtocolMessageContent.fromByteString(bytes).map { typedMessage => + OpenEnvelope( + SignedProtocolMessage(typedMessage, signaturesNE, protocolVersion), + recipients, + )(protocolVersion) + } + } + } + + override def pretty: Pretty[ClosedEnvelope] = prettyOfClass( + param("recipients", _.recipients), + paramIfNonEmpty("signatures", _.signatures), + ) + + override def forRecipient( + member: Member, + groupRecipients: Set[GroupRecipient], + ): Option[ClosedEnvelope] = + recipients.forMember(member, groupRecipients).map(r => this.copy(recipients = r)) + + override def closeEnvelope: this.type = this + + def toProtoV1: v1.Envelope = v1.Envelope( + content = bytes, + recipients = Some(recipients.toProtoV0), + signatures = signatures.map(_.toProtoV0), + ) + + @VisibleForTesting + def copy( + bytes: ByteString = this.bytes, + recipients: Recipients = this.recipients, + signatures: Seq[Signature] = this.signatures, + ): ClosedEnvelope = + ClosedEnvelope.create(bytes, recipients, signatures, representativeProtocolVersion) + + def verifySignatures( + snapshot: SyncCryptoApi, + sender: Member, + )(implicit ec: ExecutionContext): EitherT[Future, SignatureCheckError, Unit] = { + NonEmpty + .from(signatures) + .traverse_(ClosedEnvelope.verifySignatures(snapshot, sender, bytes, _)) + } +} + +object ClosedEnvelope extends HasProtocolVersionedCompanion[ClosedEnvelope] { + + override type Deserializer = ByteString => ParsingResult[ClosedEnvelope] + + override def name: String = "ClosedEnvelope" + + override def supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter( + ProtocolVersion.v30 + )(v1.Envelope)( + protoCompanion => + ProtoConverter.protoParser(protoCompanion.parseFrom)(_).flatMap(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + def create( + bytes: ByteString, + recipients: Recipients, + signatures: Seq[Signature], + representativeProtocolVersion: RepresentativeProtocolVersion[ClosedEnvelope.type], + ): ClosedEnvelope = + ClosedEnvelope(bytes, recipients, signatures)(representativeProtocolVersion) + + def create( + bytes: ByteString, + recipients: Recipients, + signatures: Seq[Signature], + protocolVersion: ProtocolVersion, + ): ClosedEnvelope = + create(bytes, recipients, signatures, protocolVersionRepresentativeFor(protocolVersion)) + + private[protocol] def fromProtoV1(envelopeP: v1.Envelope): ParsingResult[ClosedEnvelope] = { + val v1.Envelope(contentP, recipientsP, signaturesP) = envelopeP + for { + recipients <- ProtoConverter.parseRequired( + Recipients.fromProtoV0(_, supportGroupAddressing = true), + "recipients", + recipientsP, + ) + signatures <- signaturesP.traverse(Signature.fromProtoV0) + closedEnvelope = ClosedEnvelope + .create( + contentP, + recipients, + signatures, + protocolVersionRepresentativeFor(ProtoVersion(1)), + ) + } yield closedEnvelope + } + + def tryDefaultOpenEnvelope( + hashOps: HashOps, + protocolVersion: ProtocolVersion, + ): Lens[ClosedEnvelope, DefaultOpenEnvelope] = + Lens[ClosedEnvelope, DefaultOpenEnvelope]( + _.openEnvelope(hashOps, protocolVersion).valueOr(err => + throw new IllegalArgumentException(s"Failed to open envelope: $err") + ) + )(newOpenEnvelope => _ => newOpenEnvelope.closeEnvelope) + + override protected def deserializationErrorK( + error: ProtoDeserializationError + ): ByteString => ParsingResult[ClosedEnvelope] = _ => Left(error) + + def fromProtocolMessage( + protocolMessage: ProtocolMessage, + recipients: Recipients, + protocolVersion: ProtocolVersion, + ): ClosedEnvelope = { + protocolMessage match { + case SignedProtocolMessage(typedMessageContent, signatures) => + ClosedEnvelope.create( + typedMessageContent.toByteString, + recipients, + signatures, + protocolVersion, + ) + case _ => + ClosedEnvelope.create( + EnvelopeContent.tryCreate(protocolMessage, protocolVersion).toByteString, + recipients, + Seq.empty, + protocolVersion, + ) + } + } + + def verifySignatures( + snapshot: SyncCryptoApi, + sender: Member, + content: ByteString, + signatures: NonEmpty[Seq[Signature]], + ): EitherT[Future, SignatureCheckError, Unit] = { + val hash = snapshot.pureCrypto.digest(HashPurpose.SignedProtocolMessageSignature, content) + snapshot.verifySignatures(hash, sender, signatures) + } + + def verifySignatures( + snapshot: SyncCryptoApi, + mediatorGroupIndex: MediatorGroupIndex, + content: ByteString, + signatures: NonEmpty[Seq[Signature]], + )(implicit traceContext: TraceContext): EitherT[Future, SignatureCheckError, Unit] = { + val hash = snapshot.pureCrypto.digest(HashPurpose.SignedProtocolMessageSignature, content) + snapshot.verifySignatures(hash, mediatorGroupIndex, signatures) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Envelope.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Envelope.scala new file mode 100644 index 0000000000..dfbfe8893f --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Envelope.scala @@ -0,0 +1,25 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import com.digitalasset.canton.logging.pretty.PrettyPrinting +import com.digitalasset.canton.topology.Member + +/** An [[Envelope]] wraps an envelope content such as a [[com.digitalasset.canton.protocol.messages.ProtocolMessage]] + * together with the recipients. + * + * @tparam M The type of the envelope content + */ +trait Envelope[+M] extends PrettyPrinting { + + def recipients: Recipients + + def forRecipient( + member: Member, + groupAddresses: Set[GroupRecipient], + ): Option[Envelope[M]] + + /** Closes the envelope by serializing the contents if necessary */ + def closeEnvelope: ClosedEnvelope +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/HandshakeRequest.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/HandshakeRequest.scala new file mode 100644 index 0000000000..ebc285beee --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/HandshakeRequest.scala @@ -0,0 +1,42 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import cats.syntax.traverse.* +import com.digitalasset.canton.protocol.v0 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.version.ProtocolVersion + +final case class HandshakeRequest( + clientProtocolVersions: Seq[ProtocolVersion], + minimumProtocolVersion: Option[ProtocolVersion], +) { + + // IMPORTANT: changing the version handshakes can lead to issues with upgrading domains - be very careful + // when changing the handshake message format + def toProtoV0: v0.Handshake.Request = + v0.Handshake.Request( + clientProtocolVersions.map(_.toProtoPrimitiveS), + minimumProtocolVersion.map(_.toProtoPrimitiveS), + ) + + // IMPORTANT: changing the version handshakes can lead to issues with upgrading domains - be very careful + // when changing the handshake message format + def toByteArrayV0: Array[Byte] = toProtoV0.toByteArray + +} + +object HandshakeRequest { + def fromProtoV0( + requestP: v0.Handshake.Request + ): ParsingResult[HandshakeRequest] = + for { + clientProtocolVersions <- requestP.clientProtocolVersions.traverse(version => + ProtocolVersion.fromProtoPrimitiveS(version) + ) + minimumProtocolVersion <- requestP.minimumProtocolVersion.traverse( + ProtocolVersion.fromProtoPrimitiveS(_) + ) + } yield HandshakeRequest(clientProtocolVersions, minimumProtocolVersion) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/HandshakeResponse.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/HandshakeResponse.scala new file mode 100644 index 0000000000..fe5dc93295 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/HandshakeResponse.scala @@ -0,0 +1,48 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.protocol.v0 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.version.ProtocolVersion + +sealed trait HandshakeResponse { + val serverProtocolVersion: ProtocolVersion + + def toProtoV0: v0.Handshake.Response +} + +object HandshakeResponse { + final case class Success(serverProtocolVersion: ProtocolVersion) extends HandshakeResponse { + override def toProtoV0: v0.Handshake.Response = + v0.Handshake.Response( + serverProtocolVersion.toProtoPrimitiveS, + v0.Handshake.Response.Value.Success(v0.Handshake.Success()), + ) + } + final case class Failure(serverProtocolVersion: ProtocolVersion, reason: String) + extends HandshakeResponse { + override def toProtoV0: v0.Handshake.Response = + v0.Handshake + .Response( + serverProtocolVersion.toProtoPrimitiveS, + v0.Handshake.Response.Value.Failure(v0.Handshake.Failure(reason)), + ) + } + + def fromProtoV0( + responseP: v0.Handshake.Response + ): ParsingResult[HandshakeResponse] = + responseP.value match { + case v0.Handshake.Response.Value.Empty => + Left(ProtoDeserializationError.FieldNotSet("Handshake.Response.value")) + case v0.Handshake.Response.Value.Success(_success) => + ProtocolVersion.fromProtoPrimitiveS(responseP.serverProtocolVersion).map(Success) + case v0.Handshake.Response.Value.Failure(failure) => + ProtocolVersion + .fromProtoPrimitiveS(responseP.serverProtocolVersion) + .map(Failure(_, failure.reason)) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/MessageId.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/MessageId.scala new file mode 100644 index 0000000000..1291798e86 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/MessageId.scala @@ -0,0 +1,30 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import com.digitalasset.canton.checked +import com.digitalasset.canton.config.CantonRequireTypes.{ + LengthLimitedStringWrapper, + LengthLimitedStringWrapperCompanion, + String73, +} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} + +import java.util.UUID + +/** Identifier assigned by caller to a submission request. */ +final case class MessageId(override protected val str: String73) + extends LengthLimitedStringWrapper + with PrettyPrinting { + override def pretty: Pretty[MessageId] = prettyOfString(_.unwrap) +} + +object MessageId extends LengthLimitedStringWrapperCompanion[String73, MessageId] { + def fromUuid(uuid: UUID): MessageId = MessageId(checked(String73.tryCreate(uuid.toString))) + def randomMessageId(): MessageId = fromUuid(UUID.randomUUID()) + + override protected def companion: String73.type = String73 + override def instanceName: String = "MessageId" + override protected def factoryMethodWrapper(str: String73): MessageId = new MessageId(str) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/OpenEnvelope.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/OpenEnvelope.scala new file mode 100644 index 0000000000..59faa1d2a1 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/OpenEnvelope.scala @@ -0,0 +1,51 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import cats.Functor +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.protocol.messages.{DefaultOpenEnvelope, ProtocolMessage} +import com.digitalasset.canton.topology.Member +import com.digitalasset.canton.version.ProtocolVersion + +/** An [[OpenEnvelope]] contains a not serialized protocol message + * + * @tparam M The type of the protocol message + */ +final case class OpenEnvelope[+M <: ProtocolMessage]( + protocolMessage: M, + override val recipients: Recipients, +)(protocolVersion: ProtocolVersion) + extends Envelope[M] { + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def traverse[F[_], MM <: ProtocolMessage]( + f: M => F[MM] + )(implicit F: Functor[F]): F[OpenEnvelope[MM]] = + F.map(f(protocolMessage)) { newProtocolMessage => + if (newProtocolMessage eq protocolMessage) this.asInstanceOf[OpenEnvelope[MM]] + else this.copy(protocolMessage = newProtocolMessage) + } + + override def pretty: Pretty[DefaultOpenEnvelope] = + prettyOfClass(unnamedParam(_.protocolMessage), param("recipients", _.recipients)) + + override def forRecipient( + member: Member, + groupAddresses: Set[GroupRecipient], + ): Option[OpenEnvelope[M]] = { + val subtrees = recipients.forMember(member, groupAddresses) + subtrees.map(s => this.copy(recipients = s)) + } + + /** Closes the envelope by serializing the contents */ + override def closeEnvelope: ClosedEnvelope = + ClosedEnvelope.fromProtocolMessage(protocolMessage, recipients, protocolVersion) + + /** Copy method without the second argument so that it can be used with autogenerated lenses */ + def copy[MM <: ProtocolMessage]( + protocolMessage: MM = protocolMessage, + recipients: Recipients = recipients, + ): OpenEnvelope[MM] = OpenEnvelope(protocolMessage, recipients)(protocolVersion) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Recipient.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Recipient.scala new file mode 100644 index 0000000000..b1b60a6faa --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Recipient.scala @@ -0,0 +1,191 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import cats.syntax.either.* +import com.digitalasset.canton.ProtoDeserializationError.{ + InvariantViolation, + StringConversionError, + ValueConversionError, +} +import com.digitalasset.canton.config.CantonRequireTypes.{String3, String300} +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex +import com.digitalasset.canton.topology.{Member, PartyId, SafeSimpleString, UniqueIdentifier} + +sealed trait Recipient extends Product with Serializable with PrettyPrinting { + def toProtoPrimitive: String = toLengthLimitedString.unwrap + + def toLengthLimitedString: String300 +} + +object Recipient { + def fromProtoPrimitive( + recipient: String, + fieldName: String, + ): ParsingResult[Recipient] = { + val dlen = SafeSimpleString.delimiter.length + val (typ, rest) = { + val (code, str) = recipient.splitAt(3) + (code, str.drop(dlen)) + } + lazy val codeE = GroupRecipientCode.fromProtoPrimitive(typ, fieldName) + + if (codeE.isLeft) + Member.fromProtoPrimitive(recipient, fieldName).map(MemberRecipient) + else + for { + _ <- Either.cond( + recipient.length >= 3 + dlen, + (), + ValueConversionError( + fieldName, + s"Invalid group recipient `$recipient`, expecting ::id::info.", + ), + ) + _ <- Either.cond( + recipient.substring(3, 3 + dlen) == SafeSimpleString.delimiter, + (), + ValueConversionError( + fieldName, + s"Expected delimiter ${SafeSimpleString.delimiter} after three letter code of `$recipient`", + ), + ) + code <- codeE + groupRecipient <- code match { + case ParticipantsOfParty.Code => + UniqueIdentifier + .fromProtoPrimitive(rest, fieldName) + .map(PartyId(_)) + .map(ParticipantsOfParty(_)) + case SequencersOfDomain.Code => + Right(SequencersOfDomain) + case MediatorsOfDomain.Code => + for { + groupInt <- + Either + .catchOnly[NumberFormatException](rest.toInt) + .leftMap(e => + StringConversionError( + s"Cannot parse group number $rest, error ${e.getMessage}" + ) + ) + group <- NonNegativeInt + .create(groupInt) + .leftMap(e => InvariantViolation(e.message)) + } yield MediatorsOfDomain(group) + case AllMembersOfDomain.Code => + Right(AllMembersOfDomain) + } + } yield groupRecipient + } + +} + +sealed trait GroupRecipientCode { + def threeLetterId: String3 + + def toProtoPrimitive: String = threeLetterId.unwrap +} + +object GroupRecipientCode { + def fromProtoPrimitive_(code: String): Either[String, GroupRecipientCode] = + String3.create(code).flatMap { + case ParticipantsOfParty.Code.threeLetterId => Right(ParticipantsOfParty.Code) + case SequencersOfDomain.Code.threeLetterId => Right(SequencersOfDomain.Code) + case MediatorsOfDomain.Code.threeLetterId => Right(MediatorsOfDomain.Code) + case AllMembersOfDomain.Code.threeLetterId => Right(AllMembersOfDomain.Code) + case _ => Left(s"Unknown three letter type $code") + } + + def fromProtoPrimitive( + code: String, + field: String, + ): ParsingResult[GroupRecipientCode] = + fromProtoPrimitive_(code).leftMap(ValueConversionError(field, _)) +} + +sealed trait GroupRecipient extends Recipient { + def code: GroupRecipientCode + def suffix: String + + def toLengthLimitedString: String300 = + String300.tryCreate( + s"${code.threeLetterId.unwrap}${SafeSimpleString.delimiter}$suffix" + ) +} + +object TopologyBroadcastAddress { + val recipient: Recipient = AllMembersOfDomain +} + +final case class MemberRecipient(member: Member) extends Recipient { + override def pretty: Pretty[MemberRecipient] = + prettyOfClass( + unnamedParam(_.member) + ) + + override def toLengthLimitedString: String300 = member.toLengthLimitedString +} + +final case class ParticipantsOfParty(party: PartyId) extends GroupRecipient { + override def pretty: Pretty[ParticipantsOfParty] = + prettyOfClass( + unnamedParam(_.party) + ) + + override def code: GroupRecipientCode = ParticipantsOfParty.Code + + override def suffix: String = party.toProtoPrimitive +} + +object ParticipantsOfParty { + object Code extends GroupRecipientCode { + val threeLetterId: String3 = String3.tryCreate("POP") + } +} + +final case object SequencersOfDomain extends GroupRecipient { + override def pretty: Pretty[SequencersOfDomain.type] = + prettyOfObject[SequencersOfDomain.type] + + override def code: GroupRecipientCode = SequencersOfDomain.Code + + override def suffix: String = "" + + object Code extends GroupRecipientCode { + val threeLetterId: String3 = String3.tryCreate("SOD") + } +} + +final case class MediatorsOfDomain(group: MediatorGroupIndex) extends GroupRecipient { + override def pretty: Pretty[MediatorsOfDomain] = + prettyOfClass( + param("group", _.group) + ) + + override def code: GroupRecipientCode = MediatorsOfDomain.Code + + override def suffix: String = group.toString +} + +object MediatorsOfDomain { + object Code extends GroupRecipientCode { + val threeLetterId: String3 = String3.tryCreate("MOD") + } +} + +case object AllMembersOfDomain extends GroupRecipient { + override def pretty: Pretty[AllMembersOfDomain.type] = + prettyOfString(_ => suffix) + + override def code: GroupRecipientCode = Code + + override def suffix: String = "All" + object Code extends GroupRecipientCode { + val threeLetterId: String3 = String3.tryCreate("ALL") + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Recipients.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Recipients.scala new file mode 100644 index 0000000000..4babf4661a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Recipients.scala @@ -0,0 +1,107 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import cats.syntax.reducible.* +import cats.syntax.traverse.* +import com.daml.nonempty.NonEmpty +import com.daml.nonempty.catsinstances.* +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.v0 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.Member + +/** Recipients of a batch. Uses a list of [[com.digitalasset.canton.sequencing.protocol.RecipientsTree]]s + * that define the members receiving a batch, and which members see which other recipients. + */ +final case class Recipients(trees: NonEmpty[Seq[RecipientsTree]]) extends PrettyPrinting { + + lazy val allRecipients: NonEmpty[Set[Recipient]] = trees + .flatMap(t => t.allRecipients) + .toSet + + def allPaths: NonEmpty[Seq[NonEmpty[Seq[NonEmpty[Set[Recipient]]]]]] = trees.flatMap(_.allPaths) + + def forMember( + member: Member, + groupRecipients: Set[GroupRecipient], + ): Option[Recipients] = { + val ts = trees.forgetNE.flatMap(t => t.forMember(member, groupRecipients)) + val optTs = NonEmpty.from(ts) + optTs.map(Recipients(_)) + } + + def toProtoV0: v0.Recipients = { + val protoTrees = trees.map(_.toProtoV0) + new v0.Recipients(protoTrees.toList) + } + + override def pretty: Pretty[Recipients.this.type] = + prettyOfClass(param("Recipient trees", _.trees.toList)) + + def asSingleGroup: Option[NonEmpty[Set[Recipient]]] = { + trees match { + case Seq(RecipientsTree(group, Seq())) => + NonEmpty.from(group) + case _ => None + } + } + + /** Recipients that appear at the leaf of the BCC tree. For example, the informees of a view are leaf members of the + * view message. + */ + lazy val leafRecipients: NonEmpty[Set[Recipient]] = + trees.toNEF.reduceLeftTo(_.leafRecipients)(_ ++ _.leafRecipients) +} + +object Recipients { + + def fromProtoV0( + proto: v0.Recipients, + supportGroupAddressing: Boolean, + ): ParsingResult[Recipients] = { + for { + trees <- proto.recipientsTree.traverse(t => + RecipientsTree.fromProtoV0(t, supportGroupAddressing) + ) + recipients <- NonEmpty + .from(trees) + .toRight( + ProtoDeserializationError.ValueConversionError( + "RecipientsTree.recipients", + s"RecipientsTree.recipients must be non-empty", + ) + ) + } yield Recipients(recipients) + } + + /** Create a [[com.digitalasset.canton.sequencing.protocol.Recipients]] representing a group of + * members that "see" each other. + */ + def cc(first: Member, others: Member*): Recipients = + Recipients(NonEmpty(Seq, RecipientsTree.leaf(NonEmpty(Set, first, others: _*)))) + + def cc(recipient: Recipient, others: Recipient*): Recipients = { + Recipients(NonEmpty.mk(Seq, RecipientsTree(NonEmpty.mk(Set, recipient, others *), Seq.empty))) + } + + /** Create a [[com.digitalasset.canton.sequencing.protocol.Recipients]] representing independent groups of members + * that do not "see" each other. + */ + def groups(groups: NonEmpty[Seq[NonEmpty[Set[Member]]]]): Recipients = + Recipients(groups.map(group => RecipientsTree.leaf(group))) + + /** Create a [[com.digitalasset.canton.sequencing.protocol.Recipients]] representing independent groups of [[Recipient]] + * that do not "see" each other. + */ + def recipientGroups(groups: NonEmpty[Seq[NonEmpty[Set[Recipient]]]]): Recipients = + Recipients(groups.map(group => RecipientsTree.recipientsLeaf(group))) + + def ofSet[T <: Member](set: Set[T]): Option[Recipients] = { + val members = set.toList + NonEmpty.from(members).map(list => Recipients.cc(list.head1, list.tail1: _*)) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/RecipientsTree.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/RecipientsTree.scala new file mode 100644 index 0000000000..38eca75cc0 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/RecipientsTree.scala @@ -0,0 +1,118 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import cats.syntax.reducible.* +import cats.syntax.traverse.* +import com.daml.nonempty.NonEmpty +import com.daml.nonempty.catsinstances.* +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.v0 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.Member + +/** A tree representation of the recipients for a batch. + * Each member receiving the batch should see only subtrees of recipients from a node containing + * the member. If a member is present in a subtree A and a sub-subtree of A then it should only see + * the top-level subtree A. + */ +final case class RecipientsTree( + recipientGroup: NonEmpty[Set[Recipient]], + children: Seq[RecipientsTree], +) extends PrettyPrinting { + + override def pretty: Pretty[RecipientsTree] = + prettyOfClass( + param("recipient group", _.recipientGroup.toList), + paramIfNonEmpty("children", _.children), + ) + + lazy val allRecipients: NonEmpty[Set[Recipient]] = { + val tail: Set[Recipient] = children.flatMap(t => t.allRecipients).toSet + recipientGroup ++ tail + } + + def allPaths: NonEmpty[Seq[NonEmpty[Seq[NonEmpty[Set[Recipient]]]]]] = + NonEmpty.from(children) match { + case Some(childrenNE) => + childrenNE.flatMap { child => + child.allPaths.map(p => recipientGroup +: p) + } + case None => NonEmpty(Seq, NonEmpty(Seq, recipientGroup)) + } + + def forMember( + member: Member, + groupRecipients: Set[GroupRecipient], + ): Seq[RecipientsTree] = + if ( + recipientGroup + .exists { + case MemberRecipient(m) => member == m + case g: GroupRecipient => + groupRecipients.contains(g) + } + ) { + Seq(this) + } else { + children.flatMap(c => c.forMember(member, groupRecipients)) + } + + lazy val leafRecipients: NonEmpty[Set[Recipient]] = children match { + case NonEmpty(cs) => cs.toNEF.reduceLeftTo(_.leafRecipients)(_ ++ _.leafRecipients) + case _ => recipientGroup.map(m => m: Recipient) + } + + def toProtoV0: v0.RecipientsTree = { + val recipientsP = recipientGroup.toSeq.map(_.toProtoPrimitive).sorted + val childrenP = children.map(_.toProtoV0) + new v0.RecipientsTree(recipientsP, childrenP) + } +} + +object RecipientsTree { + def ofRecipients( + recipientGroup: NonEmpty[Set[Recipient]], + children: Seq[RecipientsTree], + ): RecipientsTree = RecipientsTree(recipientGroup, children) + + def ofMembers( + recipientGroup: NonEmpty[Set[Member]], + children: Seq[RecipientsTree], + ): RecipientsTree = RecipientsTree(recipientGroup.map(MemberRecipient), children) + + def leaf(group: NonEmpty[Set[Member]]): RecipientsTree = + RecipientsTree(group.map(MemberRecipient), Seq.empty) + + def recipientsLeaf(group: NonEmpty[Set[Recipient]]): RecipientsTree = + RecipientsTree(group, Seq.empty) + + def fromProtoV0( + treeProto: v0.RecipientsTree, + supportGroupAddressing: Boolean, + ): ParsingResult[RecipientsTree] = { + for { + members <- treeProto.recipients.traverse(str => + if (supportGroupAddressing) + Recipient.fromProtoPrimitive(str, "RecipientsTreeProto.recipients") + else Member.fromProtoPrimitive(str, "RecipientsTreeProto.recipients").map(MemberRecipient) + ) + recipientsNonEmpty <- NonEmpty + .from(members) + .toRight( + ProtoDeserializationError.ValueConversionError( + "RecipientsTree.recipients", + s"RecipientsTree.recipients must be non-empty", + ) + ) + children = treeProto.children + childTrees <- children.toList.traverse(fromProtoV0(_, supportGroupAddressing)) + } yield RecipientsTree( + recipientsNonEmpty.toSet, + childTrees, + ) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SendAsyncError.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SendAsyncError.scala new file mode 100644 index 0000000000..ea4659c76d --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SendAsyncError.scala @@ -0,0 +1,188 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.daml.error.ErrorCategory +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.domain.api.v0 +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult + +/** Synchronous error returned by a sequencer. */ +sealed trait SendAsyncError extends PrettyPrinting { + + val message: String + + protected def toResponseReasonProto: v0.SendAsyncResponse.Error.Reason + protected def toSignedResponseReasonProto: v0.SendAsyncSignedResponse.Error.Reason + + private[protocol] def toSendAsyncResponseProto: v0.SendAsyncResponse.Error = + v0.SendAsyncResponse.Error(toResponseReasonProto) + + private[protocol] def toSendAsyncSignedResponseProto: v0.SendAsyncSignedResponse.Error = + v0.SendAsyncSignedResponse.Error(toSignedResponseReasonProto) + + override def pretty: Pretty[SendAsyncError] = prettyOfClass(unnamedParam(_.message.unquoted)) + + def category: ErrorCategory + +} + +object SendAsyncError { + + /** The request could not be deserialized to be processed */ + final case class RequestInvalid(message: String) extends SendAsyncError { + protected def toResponseReasonProto: v0.SendAsyncResponse.Error.Reason = + v0.SendAsyncResponse.Error.Reason.RequestInvalid(message) + protected def toSignedResponseReasonProto: v0.SendAsyncSignedResponse.Error.Reason = + v0.SendAsyncSignedResponse.Error.Reason.RequestInvalid(message) + override def category: ErrorCategory = ErrorCategory.InvalidIndependentOfSystemState + } + + /** The request server could read the request but refused to accept it */ + final case class RequestRefused(message: String) extends SendAsyncError { + protected def toResponseReasonProto: v0.SendAsyncResponse.Error.Reason = + v0.SendAsyncResponse.Error.Reason.RequestRefused(message) + protected def toSignedResponseReasonProto: v0.SendAsyncSignedResponse.Error.Reason = + v0.SendAsyncSignedResponse.Error.Reason.RequestRefused(message) + override def category: ErrorCategory = ErrorCategory.InvalidGivenCurrentSystemStateOther + } + + /** The Sequencer is overloaded and declined to handle the request */ + final case class Overloaded(message: String) extends SendAsyncError { + protected def toResponseReasonProto: v0.SendAsyncResponse.Error.Reason = + v0.SendAsyncResponse.Error.Reason.Overloaded(message) + protected def toSignedResponseReasonProto: v0.SendAsyncSignedResponse.Error.Reason = + v0.SendAsyncSignedResponse.Error.Reason.Overloaded(message) + override def category: ErrorCategory = ErrorCategory.ContentionOnSharedResources + } + + /** The sequencer is unable to process requests (if the service is running it could mean the sequencer is going through a crash recovery process) */ + final case class Unavailable(message: String) extends SendAsyncError { + protected def toResponseReasonProto: v0.SendAsyncResponse.Error.Reason = + v0.SendAsyncResponse.Error.Reason.Unavailable(message) + protected def toSignedResponseReasonProto: v0.SendAsyncSignedResponse.Error.Reason = + v0.SendAsyncSignedResponse.Error.Reason.Unavailable(message) + override def category: ErrorCategory = ErrorCategory.TransientServerFailure + } + + /** The Sequencer was unable to handle the send as the sender was unknown so could not asynchronously deliver them a deliver event or error */ + final case class SenderUnknown(message: String) extends SendAsyncError { + protected def toResponseReasonProto: v0.SendAsyncResponse.Error.Reason = + v0.SendAsyncResponse.Error.Reason.SenderUnknown(message) + protected def toSignedResponseReasonProto: v0.SendAsyncSignedResponse.Error.Reason = + v0.SendAsyncSignedResponse.Error.Reason.SenderUnknown(message) + override def category: ErrorCategory = ErrorCategory.InvalidGivenCurrentSystemStateOther + } + + final case class UnknownRecipients(message: String) extends SendAsyncError { + protected def toResponseReasonProto: v0.SendAsyncResponse.Error.Reason = + v0.SendAsyncResponse.Error.Reason.SenderUnknown(message) + protected def toSignedResponseReasonProto: v0.SendAsyncSignedResponse.Error.Reason = + v0.SendAsyncSignedResponse.Error.Reason.SenderUnknown(message) + override def category: ErrorCategory = ErrorCategory.InvalidGivenCurrentSystemStateOther + } + + /** The Sequencer declined to process new requests as it is shutting down */ + final case class ShuttingDown(message: String = "Sequencer shutting down") + extends SendAsyncError { + protected def toResponseReasonProto: v0.SendAsyncResponse.Error.Reason = + v0.SendAsyncResponse.Error.Reason.ShuttingDown(message) + protected def toSignedResponseReasonProto: v0.SendAsyncSignedResponse.Error.Reason = + v0.SendAsyncSignedResponse.Error.Reason.ShuttingDown(message) + override def category: ErrorCategory = ErrorCategory.TransientServerFailure + } + + final case class Internal(message: String) extends SendAsyncError { + protected def toResponseReasonProto: v0.SendAsyncResponse.Error.Reason = + throw new IllegalStateException( + "Message `Internal` introduced with protocol version 4 should not be included in `v0.SendAsyncResponse`" + ) + + protected def toSignedResponseReasonProto: v0.SendAsyncSignedResponse.Error.Reason = + v0.SendAsyncSignedResponse.Error.Reason.Internal(message) + + override def category: ErrorCategory = ErrorCategory.TransientServerFailure + } + + final case class Generic(message: String) extends SendAsyncError { + protected def toResponseReasonProto: v0.SendAsyncResponse.Error.Reason = + throw new IllegalStateException( + "Message `Generic` introduced with protocol version 4 should not be included in `v0.SendAsyncResponse`" + ) + protected def toSignedResponseReasonProto: v0.SendAsyncSignedResponse.Error.Reason = + v0.SendAsyncSignedResponse.Error.Reason.Generic(message) + + override def category: ErrorCategory = ErrorCategory.TransientServerFailure + } + + private[protocol] def fromErrorProto( + error: v0.SendAsyncResponse.Error + ): ParsingResult[SendAsyncError] = + error.reason match { + case v0.SendAsyncResponse.Error.Reason.Empty => + ProtoDeserializationError.FieldNotSet("SendAsyncResponse.error.reason").asLeft + case v0.SendAsyncResponse.Error.Reason.RequestInvalid(message) => + RequestInvalid(message).asRight + case v0.SendAsyncResponse.Error.Reason.RequestRefused(message) => + RequestRefused(message).asRight + case v0.SendAsyncResponse.Error.Reason.Overloaded(message) => Overloaded(message).asRight + case v0.SendAsyncResponse.Error.Reason.Unavailable(message) => Unavailable(message).asRight + case v0.SendAsyncResponse.Error.Reason.SenderUnknown(message) => + SenderUnknown(message).asRight + case v0.SendAsyncResponse.Error.Reason.UnknownRecipients(message) => + UnknownRecipients(message).asRight + case v0.SendAsyncResponse.Error.Reason.ShuttingDown(message) => ShuttingDown(message).asRight + } + + private[protocol] def fromSignedErrorProto( + error: v0.SendAsyncSignedResponse.Error + ): ParsingResult[SendAsyncError] = + error.reason match { + case v0.SendAsyncSignedResponse.Error.Reason.Empty => + ProtoDeserializationError.FieldNotSet("SendAsyncResponse.error.reason").asLeft + case v0.SendAsyncSignedResponse.Error.Reason.RequestInvalid(message) => + RequestInvalid(message).asRight + case v0.SendAsyncSignedResponse.Error.Reason.RequestRefused(message) => + RequestRefused(message).asRight + case v0.SendAsyncSignedResponse.Error.Reason.Overloaded(message) => + Overloaded(message).asRight + case v0.SendAsyncSignedResponse.Error.Reason.Unavailable(message) => + Unavailable(message).asRight + case v0.SendAsyncSignedResponse.Error.Reason.SenderUnknown(message) => + SenderUnknown(message).asRight + case v0.SendAsyncSignedResponse.Error.Reason.UnknownRecipients(message) => + UnknownRecipients(message).asRight + case v0.SendAsyncSignedResponse.Error.Reason.ShuttingDown(message) => + ShuttingDown(message).asRight + case v0.SendAsyncSignedResponse.Error.Reason.Internal(message) => Internal(message).asRight + case v0.SendAsyncSignedResponse.Error.Reason.Generic(message) => Generic(message).asRight + } +} + +final case class SendAsyncResponse(error: Option[SendAsyncError]) { + def toSendAsyncResponseProto: v0.SendAsyncResponse = + v0.SendAsyncResponse(error.map(_.toSendAsyncResponseProto)) + + def toSendAsyncSignedResponseProto: v0.SendAsyncSignedResponse = + v0.SendAsyncSignedResponse(error.map(_.toSendAsyncSignedResponseProto)) +} + +object SendAsyncResponse { + def fromSendAsyncResponseProto( + responseP: v0.SendAsyncResponse + ): ParsingResult[SendAsyncResponse] = + for { + error <- responseP.error.traverse(SendAsyncError.fromErrorProto) + } yield SendAsyncResponse(error) + + def fromSendAsyncSignedResponseProto( + responseP: v0.SendAsyncSignedResponse + ): ParsingResult[SendAsyncResponse] = + for { + error <- responseP.error.traverse(SendAsyncError.fromSignedErrorProto) + } yield SendAsyncResponse(error) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SequencedEvent.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SequencedEvent.scala new file mode 100644 index 0000000000..b5dab97c6d --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SequencedEvent.scala @@ -0,0 +1,379 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import cats.Applicative +import com.digitalasset.canton.ProtoDeserializationError.OtherError +import com.digitalasset.canton.* +import com.digitalasset.canton.crypto.HashOps +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.messages.{DefaultOpenEnvelope, ProtocolMessage} +import com.digitalasset.canton.protocol.v1 +import com.digitalasset.canton.sequencing.{EnvelopeBox, RawSignedContentEnvelopeBox} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{ProtoConverter, ProtocolVersionedMemoizedEvidence} +import com.digitalasset.canton.topology.DomainId +import com.digitalasset.canton.util.NoCopy +import com.digitalasset.canton.version.{ + HasMemoizedProtocolVersionedWrapperCompanion2, + HasProtocolVersionedWrapper, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.ByteString +import com.google.rpc.status.Status +import pprint.Tree +import pprint.Tree.{Apply, KeyValue, Literal} + +/** The Deliver events are received as a consequence of a '''Send''' command, received by the recipients of the + * originating '''Send''' event. + */ +sealed trait SequencedEvent[+Env <: Envelope[_]] + extends Product + with Serializable + with ProtocolVersionedMemoizedEvidence + with PrettyPrinting + with HasProtocolVersionedWrapper[SequencedEvent[Envelope[_]]] { + + @transient override protected lazy val companionObj: SequencedEvent.type = SequencedEvent + + protected def toProtoV1: v1.SequencedEvent + + /** a sequence counter for each recipient. + */ + val counter: SequencerCounter + + /** a timestamp defining the order (requestId) + */ + val timestamp: CantonTimestamp + + /** The domain which this deliver event belongs to */ + val domainId: DomainId + + def isTombstone: Boolean = false + + protected[this] def toByteStringUnmemoized: ByteString = + super[HasProtocolVersionedWrapper].toByteString + + protected def traverse[F[_], Env2 <: Envelope[_]](f: Env => F[Env2])(implicit + F: Applicative[F] + ): F[SequencedEvent[Env2]] + + def envelopes: Seq[Env] +} + +object SequencedEvent + extends HasMemoizedProtocolVersionedWrapperCompanion2[ + SequencedEvent[Envelope[_]], + SequencedEvent[ClosedEnvelope], + ] { + override def name: String = "SequencedEvent" + + override val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)(v1.SequencedEvent)( + supportedProtoVersionMemoized(_)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + private[sequencing] def fromProtoV1(sequencedEventP: v1.SequencedEvent)( + bytes: ByteString + ): ParsingResult[SequencedEvent[ClosedEnvelope]] = { + import cats.syntax.traverse.* + val v1.SequencedEvent(counter, tsP, domainIdP, mbMsgIdP, mbBatchP, mbDeliverErrorReasonP) = + sequencedEventP + + val rpv = protocolVersionRepresentativeFor(ProtoVersion(1)) + val sequencerCounter = SequencerCounter(counter) + + for { + timestamp <- ProtoConverter + .required("SequencedEvent.timestamp", tsP) + .flatMap(CantonTimestamp.fromProtoPrimitive) + domainId <- DomainId.fromProtoPrimitive(domainIdP, "SequencedEvent.domainId") + mbBatch <- mbBatchP.traverse( + // TODO(i10428) Prevent zip bombing when decompressing the request + Batch.fromProtoV1(_, maxRequestSize = MaxRequestSizeToDeserialize.NoLimit) + ) + // errors have an error reason, delivers have a batch + event <- ((mbDeliverErrorReasonP, mbBatch) match { + case (Some(_), Some(_)) => + Left(OtherError("SequencedEvent cannot have both a deliver error and batch set")) + case (None, None) => + Left(OtherError("SequencedEvent cannot have neither a deliver error nor a batch set")) + case (Some(deliverErrorReason), None) => + for { + msgId <- ProtoConverter + .required("DeliverError", mbMsgIdP) + .flatMap(MessageId.fromProtoPrimitive) + } yield new DeliverError( + sequencerCounter, + timestamp, + domainId, + msgId, + deliverErrorReason, + )( + rpv, + Some(bytes), + ) {} + case (None, Some(batch)) => + mbMsgIdP match { + case None => + Right( + Deliver(sequencerCounter, timestamp, domainId, None, batch)( + rpv, + Some(bytes), + ) + ) + case Some(msgId) => + MessageId + .fromProtoPrimitive(msgId) + .map(msgId => + Deliver(sequencerCounter, timestamp, domainId, Some(msgId), batch)( + rpv, + Some(bytes), + ) + ) + } + }): ParsingResult[SequencedEvent[ClosedEnvelope]] + } yield event + } + + def fromByteStringOpen(hashOps: HashOps, protocolVersion: ProtocolVersion)( + bytes: ByteString + ): ParsingResult[SequencedEvent[DefaultOpenEnvelope]] = + fromByteString(bytes).flatMap(_.traverse(_.openEnvelope(hashOps, protocolVersion))) + + implicit val sequencedEventEnvelopeBox: EnvelopeBox[SequencedEvent] = + new EnvelopeBox[SequencedEvent] { + override private[sequencing] def traverse[G[_], A <: Envelope[_], B <: Envelope[_]]( + event: SequencedEvent[A] + )(f: A => G[B])(implicit G: Applicative[G]): G[SequencedEvent[B]] = + event.traverse(f) + } + + // It would be nice if we could appeal to a generic composition theorem here, + // but the `MemoizeEvidence` bound in `SignedContent` doesn't allow a generic `Traverse` instance. + implicit val signedContentEnvelopeBox: EnvelopeBox[RawSignedContentEnvelopeBox] = + new EnvelopeBox[RawSignedContentEnvelopeBox] { + override private[sequencing] def traverse[G[_], Env1 <: Envelope[_], Env2 <: Envelope[_]]( + signedEvent: SignedContent[SequencedEvent[Env1]] + )(f: Env1 => G[Env2])(implicit G: Applicative[G]): G[RawSignedContentEnvelopeBox[Env2]] = + signedEvent.traverse(_.traverse(f)) + } + + def openEnvelopes( + event: SequencedEvent[ClosedEnvelope] + )(protocolVersion: ProtocolVersion, hashOps: HashOps): ( + SequencedEvent[OpenEnvelope[ProtocolMessage]], + Seq[ProtoDeserializationError], + ) = event match { + case deliver: Deliver[ClosedEnvelope] => + Deliver.openEnvelopes(deliver)(protocolVersion, hashOps) + case deliver: DeliverError => (deliver, Seq.empty) + } +} + +sealed abstract case class DeliverError private[sequencing] ( + override val counter: SequencerCounter, + override val timestamp: CantonTimestamp, + override val domainId: DomainId, + messageId: MessageId, + reason: Status, +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[SequencedEvent.type], + override val deserializedFrom: Option[ByteString], +) extends SequencedEvent[Nothing] + with NoCopy { + + def toProtoV1: v1.SequencedEvent = v1.SequencedEvent( + counter = counter.toProtoPrimitive, + timestamp = Some(timestamp.toProtoPrimitive), + domainId = domainId.toProtoPrimitive, + messageId = Some(messageId.toProtoPrimitive), + batch = None, + deliverErrorReason = Some(reason), + ) + + override protected def traverse[F[_], Env <: Envelope[_]](f: Nothing => F[Env])(implicit + F: Applicative[F] + ): F[SequencedEvent[Env]] = F.pure(this) + + override def pretty: Pretty[DeliverError] = prettyOfClass( + param("counter", _.counter), + param("timestamp", _.timestamp), + param("domain id", _.domainId), + param("message id", _.messageId), + param("reason", _.reason), + ) + + def envelopes: Seq[Nothing] = Seq.empty + + override def isTombstone: Boolean = reason match { + case SequencerErrors.PersistTombstone(_) => true + case _ => false + } +} + +object DeliverError { + + implicit val prettyStatus: Pretty[Status] = new Pretty[Status] { + override def treeOf(t: Status): Tree = { + Apply( + "Status", + Seq( + KeyValue("Code", Literal(t.code.toString)), + KeyValue("Message", Literal(t.message)), + ).iterator, + ) + } + } + + def create( + counter: SequencerCounter, + timestamp: CantonTimestamp, + domainId: DomainId, + messageId: MessageId, + sequencerError: SequencerDeliverError, + protocolVersion: ProtocolVersion, + ): DeliverError = { + new DeliverError( + counter, + timestamp, + domainId, + messageId, + sequencerError.rpcStatusWithoutLoggingContext(), + )( + SequencedEvent.protocolVersionRepresentativeFor(protocolVersion), + None, + ) {} + } + + def create( + counter: SequencerCounter, + timestamp: CantonTimestamp, + domainId: DomainId, + messageId: MessageId, + status: Status, + protocolVersion: ProtocolVersion, + ): DeliverError = + new DeliverError(counter, timestamp, domainId, messageId, status)( + SequencedEvent.protocolVersionRepresentativeFor(protocolVersion), + None, + ) {} +} + +/** Intuitively, the member learns all envelopes addressed to it. It learns some recipients of + * these envelopes, as defined by + * [[com.digitalasset.canton.sequencing.protocol.Recipients.forMember]] + * + * @param counter a monotonically increasing counter for each recipient. + * @param timestamp a timestamp defining the order. + * @param messageIdO populated with the message id used on the originating send operation only for the sender + * @param batch a batch of envelopes. + */ +@SuppressWarnings(Array("org.wartremover.warts.FinalCaseClass")) // This class is mocked in tests +case class Deliver[+Env <: Envelope[_]] private[sequencing] ( + override val counter: SequencerCounter, + override val timestamp: CantonTimestamp, + override val domainId: DomainId, + messageIdO: Option[MessageId], + batch: Batch[Env], +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[SequencedEvent.type], + val deserializedFrom: Option[ByteString], +) extends SequencedEvent[Env] { + + /** Is this deliver event a receipt for a message that the receiver previously sent? + * (messageId is only populated for the sender) + */ + lazy val isReceipt: Boolean = messageIdO.isDefined + + protected[sequencing] def toProtoV1: v1.SequencedEvent = v1.SequencedEvent( + counter = counter.toProtoPrimitive, + timestamp = Some(timestamp.toProtoPrimitive), + domainId = domainId.toProtoPrimitive, + messageId = messageIdO.map(_.toProtoPrimitive), + batch = Some(batch.toProtoV1), + deliverErrorReason = None, + ) + + protected def traverse[F[_], Env2 <: Envelope[_]]( + f: Env => F[Env2] + )(implicit F: Applicative[F]) = + F.map(batch.traverse(f))( + Deliver(counter, timestamp, domainId, messageIdO, _)( + representativeProtocolVersion, + deserializedFrom, + ) + ) + + @VisibleForTesting + private[canton] def copy[Env2 <: Envelope[_]]( + counter: SequencerCounter = this.counter, + timestamp: CantonTimestamp = this.timestamp, + domainId: DomainId = this.domainId, + messageIdO: Option[MessageId] = this.messageIdO, + batch: Batch[Env2] = this.batch, + deserializedFromO: Option[ByteString] = None, + ): Deliver[Env2] = + Deliver[Env2](counter, timestamp, domainId, messageIdO, batch)( + representativeProtocolVersion, + deserializedFromO, + ) + + override def pretty: Pretty[this.type] = + prettyOfClass( + param("counter", _.counter), + param("timestamp", _.timestamp), + paramIfNonEmpty("message id", _.messageIdO), + param("domain id", _.domainId), + unnamedParam(_.batch), + ) + + def envelopes: Seq[Env] = batch.envelopes +} + +object Deliver { + def create[Env <: Envelope[_]]( + counter: SequencerCounter, + timestamp: CantonTimestamp, + domainId: DomainId, + messageIdO: Option[MessageId], + batch: Batch[Env], + protocolVersion: ProtocolVersion, + ): Deliver[Env] = + Deliver[Env](counter, timestamp, domainId, messageIdO, batch)( + SequencedEvent.protocolVersionRepresentativeFor(protocolVersion), + None, + ) + + def fromSequencedEvent[Env <: Envelope[_]]( + deliverEvent: SequencedEvent[Env] + ): Option[Deliver[Env]] = + deliverEvent match { + case deliver @ Deliver(_, _, _, _, _) => Some(deliver) + case _: DeliverError => None + } + + def openEnvelopes( + deliver: Deliver[ClosedEnvelope] + )(protocolVersion: ProtocolVersion, hashOps: HashOps): ( + Deliver[OpenEnvelope[ProtocolMessage]], + Seq[ProtoDeserializationError], + ) = { + val (openBatch, openingErrors) = + Batch.openEnvelopes(deliver.batch)(protocolVersion, hashOps) + val openDeliver = deliver.copy( + batch = openBatch, + // Keep the serialized representation only if there were no errors + deserializedFromO = if (openingErrors.isEmpty) deliver.deserializedFrom else None, + ) + + (openDeliver, openingErrors) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SequencedEventTrafficState.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SequencedEventTrafficState.scala new file mode 100644 index 0000000000..cb752af11f --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SequencedEventTrafficState.scala @@ -0,0 +1,51 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import cats.syntax.apply.* +import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveLong} +import com.digitalasset.canton.domain.api.v0.SequencedEventTrafficState as SequencedEventTrafficStateP +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.store.db.RequiredTypesCodec.* +import slick.jdbc.GetResult + +/** Traffic state stored alongside sequenced events + */ +final case class SequencedEventTrafficState( + extraTrafficRemainder: NonNegativeLong, + extraTrafficConsumed: NonNegativeLong, +) { + lazy val extraTrafficLimit: Option[PositiveLong] = + PositiveLong.create((extraTrafficRemainder + extraTrafficConsumed).value).toOption + def toProtoV0: SequencedEventTrafficStateP = { + SequencedEventTrafficStateP( + extraTrafficRemainder = extraTrafficRemainder.value, + extraTrafficConsumed = extraTrafficConsumed.value, + ) + } + +} + +object SequencedEventTrafficState { + def fromProtoV0( + stateP: SequencedEventTrafficStateP + ): ParsingResult[SequencedEventTrafficState] = { + for { + extraTrafficRemainder <- ProtoConverter.parseNonNegativeLong(stateP.extraTrafficRemainder) + extraTrafficConsumed <- ProtoConverter.parseNonNegativeLong(stateP.extraTrafficConsumed) + } yield SequencedEventTrafficState( + extraTrafficRemainder = extraTrafficRemainder, + extraTrafficConsumed = extraTrafficConsumed, + ) + } + + implicit val sequencedEventTrafficStateGetResult: GetResult[Option[SequencedEventTrafficState]] = + GetResult + .createGetTuple2( + nonNegativeLongOptionGetResult, + nonNegativeLongOptionGetResult, + ) + .andThen(_.mapN(SequencedEventTrafficState.apply)) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SequencerDeliverError.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SequencerDeliverError.scala new file mode 100644 index 0000000000..335695ce38 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SequencerDeliverError.scala @@ -0,0 +1,204 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import com.daml.error.* +import com.digitalasset.canton.SequencerCounter +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.error.CantonErrorGroups.SequencerErrorGroup +import com.digitalasset.canton.error.{BaseCantonError, TransactionError, TransactionErrorImpl} +import com.digitalasset.canton.topology.Member +import com.google.rpc.status.Status + +import java.time.Instant +import scala.collection.immutable.Seq + +sealed trait SequencerDeliverError extends TransactionError + +sealed abstract class SequencerDeliverErrorCode(id: String, category: ErrorCategory)(implicit + parent: ErrorClass +) extends ErrorCode(id, category) { + require(category.grpcCode.isDefined, "gPRC code is required for the correct matching in unapply") + + def apply(message: String): SequencerDeliverError = { + new TransactionErrorImpl( + cause = message, + definiteAnswer = true, + ) with SequencerDeliverError + } + + /** Match the GRPC status on the ErrorCode and return the message string on success + */ + def unapply(rpcStatus: Status): Option[String] = + BaseCantonError.extractStatusErrorCodeMessage(this, rpcStatus) +} + +@Explanation("""Delivery errors wrapped into sequenced events""") +object SequencerErrors extends SequencerErrorGroup { + @Explanation("""This error occurs when the sequencer cannot parse the submission request.""") + @Resolution( + """This usually indicates a misconfiguration of the system components or an application bug and requires operator intervention.""" + ) + object SubmissionRequestMalformed + extends SequencerDeliverErrorCode( + id = "SEQUENCER_SUBMISSION_REQUEST_MALFORMED", + ErrorCategory.InvalidIndependentOfSystemState, + ) + + @Explanation( + """This error occurs when the sequencer cannot accept submission request due to the current state of the system.""" + ) + @Resolution( + """This usually indicates a misconfiguration of the system components or an application bug and requires operator intervention. Please refer to a specific error message to understand the exact cause.""" + ) + object SubmissionRequestRefused + extends SequencerDeliverErrorCode( + id = "SEQUENCER_SUBMISSION_REQUEST_REFUSED", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) + + @Explanation( + """Timestamp of the signing key on the submission request is earlier than allowed by the dynamic domain parameters.""" + ) + @Resolution( + """This indicates a bug in Canton (a faulty node behaviour). Please contact customer support.""" + ) + object SigningTimestampTooEarly + extends SequencerDeliverErrorCode( + id = "SEQUENCER_SIGNING_TIMESTAMP_TOO_EARLY", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + def apply( + signingTimestamp: CantonTimestamp, + sequencingTimestamp: CantonTimestamp, + ): SequencerDeliverError = + // We can't easily compute a valid signing timestamp because we'd have to scan through + // the domain parameter updates to compute a bound, as the signing tolerance is taken + // from the domain parameters valid at the signing timestamp, not the sequencing timestamp. + apply( + s"Signing timestamp $signingTimestamp is too early for sequencing time $sequencingTimestamp." + ) + } + + @Explanation( + """Timestamp of the signing key on the submission request is later than the sequencing time.""" + ) + @Resolution( + """This indicates a bug in Canton (a faulty node behaviour). Please contact customer support.""" + ) + object SigningTimestampAfterSequencingTimestamp + extends SequencerDeliverErrorCode( + id = "SEQUENCER_SIGNING_TIMESTAMP_AFTER_SEQUENCING_TIMESTAMP", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + def apply( + signingTimestamp: CantonTimestamp, + sequencingTimestamp: CantonTimestamp, + ): SequencerDeliverError = + apply( + s"Invalid signing timestamp $signingTimestamp. The signing timestamp must be before or at $sequencingTimestamp." + ) + } + + @Explanation( + """Timestamp of the signing key is missing on the submission request.""" + ) + @Resolution( + """This indicates a bug in Canton (a faulty node behaviour). Please contact customer support.""" + ) + object SigningTimestampMissing + extends SequencerDeliverErrorCode( + id = "SEQUENCER_SIGNING_TIMESTAMP_MISSING", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) + + @Explanation( + """Maximum sequencing time on the submission request is exceeding the maximum allowed interval into the future. Could be result of a concurrent dynamic domain parameter change for sequencerAggregateSubmissionTimeout.""" + ) + @Resolution( + """In case there was a recent concurrent dynamic domain parameter change, simply retry the submission. Otherwise this error code indicates a bug in Canton (a faulty node behaviour). Please contact customer support.""" + ) + object MaxSequencingTimeTooFar + extends SequencerDeliverErrorCode( + id = "SEQUENCER_MAX_SEQUENCING_TIME_TOO_FAR", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + def apply( + messageId: MessageId, + maxSequencingTime: CantonTimestamp, + maxSequencingTimeUpperBound: Instant, + ): SequencerDeliverError = + apply( + s"Max sequencing time $maxSequencingTime for submission with id $messageId is too far in the future, currently bounded at $maxSequencingTimeUpperBound" + ) + } + + @Explanation( + """This error happens when a submission request specifies nodes that are not known to the sequencer.""" + ) + @Resolution( + """This indicates a bug in Canton (a faulty node behaviour). Please contact customer support.""" + ) + object UnknownRecipients + extends SequencerDeliverErrorCode( + id = "SEQUENCER_UNKNOWN_RECIPIENTS", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + def apply(unknownRecipients: Seq[Member]): SequencerDeliverError = { + apply(s"Unknown recipients: ${unknownRecipients.toList.take(1000).mkString(", ")}") + } + } + + @Explanation( + """This error occurs when the sequencer has already sent out the aggregate submission for the request.""" + ) + @Resolution( + """This is expected to happen during operation of a system with aggregate submissions enabled. No action required.""" + ) + object AggregateSubmissionAlreadySent + extends SequencerDeliverErrorCode( + id = "SEQUENCER_AGGREGATE_SUBMISSION_ALREADY_SENT", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) + + @Explanation( + """This error occurs when the sequencer already received the same submission request from the same sender.""" + ) + @Resolution( + """This error indicates that an aggregate submission has already been accepted by the sequencer and for some reason there is a repeated submission. This is likely caused by retrying a submission. This can usually be ignored.""" + ) + object AggregateSubmissionStuffing + extends SequencerDeliverErrorCode( + id = "SEQUENCER_AGGREGATE_SUBMISSION_STUFFING", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) + + @Explanation( + """Sequencer has refused a submission request due to insufficient credits in the sender's traffic balance.""" + ) + @Resolution( + """Acquire more traffic credits with the system by topping up traffic credits for the sender.""" + ) + object TrafficCredit + extends SequencerDeliverErrorCode( + id = "SEQUENCER_NOT_ENOUGH_TRAFFIC_CREDIT", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) + + @Explanation( + """An onboarded sequencer has put a tombstone in place of an event with a timestamp older than the sequencer signing key.""" + ) + @Resolution( + """Clients should connect to another sequencer with older event history to consume the tombstoned events + |before reconnecting to the recently onboarded sequencer.""" + ) + object PersistTombstone + extends SequencerDeliverErrorCode( + id = "SEQUENCER_TOMBSTONE_PERSISTED", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + def apply(ts: CantonTimestamp, sc: SequencerCounter): SequencerDeliverError = + apply(s"Sequencer signing key not available at ${ts} and ${sc}") + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SignedContent.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SignedContent.scala new file mode 100644 index 0000000000..7cae065a7f --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SignedContent.scala @@ -0,0 +1,268 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import cats.Functor +import cats.data.EitherT +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.RequireTypes.InvariantViolation +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.protocol.messages.DefaultOpenEnvelope +import com.digitalasset.canton.protocol.v1 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{ + BytestringWithCryptographicEvidence, + HasCryptographicEvidence, + ProtoConverter, +} +import com.digitalasset.canton.topology.Member +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.{ + HasProtocolVersionedCompanion2, + HasProtocolVersionedWrapper, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} +import com.digitalasset.canton.{ProtoDeserializationError, checked} +import com.google.protobuf.ByteString + +import scala.concurrent.{ExecutionContext, Future} +import scala.math.Ordered.orderingToOrdered + +/** @param timestampOfSigningKey The timestamp of the topology snapshot that was used for signing the content. + * [[scala.None$]] if the signing timestamp can be derived from the content. + * @param signatures Signatures of the content provided by the different sequencers. + */ +final case class SignedContent[+A <: HasCryptographicEvidence] private ( + content: A, + signatures: NonEmpty[Seq[Signature]], + timestampOfSigningKey: Option[CantonTimestamp], +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[SignedContent.type] +) extends HasProtocolVersionedWrapper[SignedContent[HasCryptographicEvidence]] + with Serializable + with Product { + @transient override protected lazy val companionObj: SignedContent.type = SignedContent + + def toProtoV1: v1.SignedContent = + v1.SignedContent( + Some(content.getCryptographicEvidence), + signatures.map(_.toProtoV0), + timestampOfSigningKey.map(_.toProtoPrimitive), + ) + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def traverse[F[_], B <: HasCryptographicEvidence]( + f: A => F[B] + )(implicit F: Functor[F]): F[SignedContent[B]] = + F.map(f(content)) { newContent => + if (newContent eq content) this.asInstanceOf[SignedContent[B]] + else this.copy(content = newContent) + } + + def verifySignature( + snapshot: SyncCryptoApi, + member: Member, + purpose: HashPurpose, + ): EitherT[Future, SignatureCheckError, Unit] = { + val hash = SignedContent.hashContent(snapshot.pureCrypto, content, purpose) + snapshot.verifySignature(hash, member, signature) + } + + def deserializeContent[B <: HasCryptographicEvidence]( + contentDeserializer: ByteString => ParsingResult[B] + ): ParsingResult[SignedContent[B]] = + this.traverse(content => contentDeserializer(content.getCryptographicEvidence)) + + // TODO(i12076): Start using multiple signatures + val signature: Signature = signatures.last1 + + def copy[B <: HasCryptographicEvidence]( + content: B = this.content, + signatures: NonEmpty[Seq[Signature]] = this.signatures, + timestampOfSigningKey: Option[CantonTimestamp] = this.timestampOfSigningKey, + ): SignedContent[B] = + SignedContent.tryCreate( + content, + signatures, + timestampOfSigningKey, + representativeProtocolVersion, + ) +} + +object SignedContent + extends HasProtocolVersionedCompanion2[ + SignedContent[HasCryptographicEvidence], + SignedContent[BytestringWithCryptographicEvidence], + ] { + + override def name: String = "SignedContent" + + override def supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)(v1.SignedContent)( + supportedProtoVersion(_)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + val multipleSignaturesSupportedSince: RepresentativeProtocolVersion[SignedContent.type] = + protocolVersionRepresentativeFor(ProtocolVersion.v30) + + // TODO(i12076): Start using multiple signatures + def apply[A <: HasCryptographicEvidence]( + content: A, + signature: Signature, + timestampOfSigningKey: Option[CantonTimestamp], + protocolVersion: ProtocolVersion, + ): SignedContent[A] = checked( // There is only a single signature + SignedContent.tryCreate( + content, + NonEmpty(Seq, signature), + timestampOfSigningKey, + protocolVersionRepresentativeFor(protocolVersion), + ) + ) + + def apply[A <: HasCryptographicEvidence]( + content: A, + signatures: NonEmpty[Seq[Signature]], + timestampOfSigningKey: Option[CantonTimestamp], + protoVersion: ProtoVersion, + ): SignedContent[A] = checked( // There is only a single signature + SignedContent.tryCreate( + content, + signatures, + timestampOfSigningKey, + protocolVersionRepresentativeFor(protoVersion), + ) + ) + + def create[A <: HasCryptographicEvidence]( + content: A, + signatures: NonEmpty[Seq[Signature]], + timestampOfSigningKey: Option[CantonTimestamp], + representativeProtocolVersion: RepresentativeProtocolVersion[SignedContent.type], + ): Either[InvariantViolation, SignedContent[A]] = + Either.cond( + representativeProtocolVersion >= multipleSignaturesSupportedSince || + signatures.sizeCompare(1) == 0, + new SignedContent(content, signatures, timestampOfSigningKey)(representativeProtocolVersion), + InvariantViolation( + s"Multiple signatures are supported only from protocol version ${multipleSignaturesSupportedSince} on" + ), + ) + + def tryCreate[A <: HasCryptographicEvidence]( + content: A, + signatures: NonEmpty[Seq[Signature]], + timestampOfSigningKey: Option[CantonTimestamp], + representativeProtocolVersion: RepresentativeProtocolVersion[SignedContent.type], + ): SignedContent[A] = + create(content, signatures, timestampOfSigningKey, representativeProtocolVersion).valueOr(err => + throw new IllegalArgumentException(err.message) + ) + + def create[A <: HasCryptographicEvidence]( + cryptoApi: CryptoPureApi, + cryptoPrivateApi: SyncCryptoApi, + content: A, + timestampOfSigningKey: Option[CantonTimestamp], + purpose: HashPurpose, + protocolVersion: ProtocolVersion, + )(implicit + traceContext: TraceContext, + ec: ExecutionContext, + ): EitherT[Future, SyncCryptoError, SignedContent[A]] = { + // as deliverEvent implements MemoizedEvidence repeated calls to serialize will return the same bytes + // so fine to call once for the hash here and then again when serializing to protobuf + val hash = hashContent(cryptoApi, content, purpose) + cryptoPrivateApi + .sign(hash) + .map(signature => SignedContent(content, signature, timestampOfSigningKey, protocolVersion)) + } + + def hashContent( + hashOps: HashOps, + content: HasCryptographicEvidence, + purpose: HashPurpose, + ): Hash = + hashOps.digest(purpose, content.getCryptographicEvidence) + + def tryCreate[A <: HasCryptographicEvidence]( + cryptoApi: CryptoPureApi, + cryptoPrivateApi: SyncCryptoApi, + content: A, + timestampOfSigningKey: Option[CantonTimestamp], + purpose: HashPurpose, + protocolVersion: ProtocolVersion, + )(implicit + traceContext: TraceContext, + ec: ExecutionContext, + ): Future[SignedContent[A]] = + create(cryptoApi, cryptoPrivateApi, content, timestampOfSigningKey, purpose, protocolVersion) + .valueOr(err => throw new IllegalStateException(s"Failed to create signed content: $err")) + + def fromProtoV1( + signedValueP: v1.SignedContent + ): ParsingResult[SignedContent[BytestringWithCryptographicEvidence]] = { + val v1.SignedContent(content, signatures, timestampOfSigningKey) = signedValueP + for { + contentB <- ProtoConverter.required("content", content) + signatures <- ProtoConverter.parseRequiredNonEmpty( + Signature.fromProtoV0, + "signature", + signatures, + ) + ts <- timestampOfSigningKey.traverse(CantonTimestamp.fromProtoPrimitive) + signedContent <- create( + BytestringWithCryptographicEvidence(contentB), + signatures, + ts, + protocolVersionRepresentativeFor(ProtoVersion(1)), + ).leftMap(ProtoDeserializationError.InvariantViolation.toProtoDeserializationError) + } yield signedContent + } + + implicit def prettySignedContent[A <: HasCryptographicEvidence](implicit + prettyA: Pretty[A] + ): Pretty[SignedContent[A]] = { + import com.digitalasset.canton.logging.pretty.PrettyUtil.* + prettyOfClass( + unnamedParam(_.content), + param("signatures", _.signatures), + paramIfDefined("timestamp of signing key", _.timestampOfSigningKey), + ) + } + + def openEnvelopes( + event: SignedContent[SequencedEvent[ClosedEnvelope]] + )( + protocolVersion: ProtocolVersion, + hashOps: HashOps, + ): Either[ + EventWithErrors[SequencedEvent[DefaultOpenEnvelope]], + SignedContent[SequencedEvent[DefaultOpenEnvelope]], + ] = { + val (openSequencedEvent, openingErrors) = + SequencedEvent.openEnvelopes(event.content)(protocolVersion, hashOps) + + Either.cond( + openingErrors.isEmpty, + event.copy(content = openSequencedEvent), // The signature is still valid + EventWithErrors(openSequencedEvent, openingErrors, isIgnored = false), + ) + } + +} + +final case class EventWithErrors[Event <: SequencedEvent[_]]( + content: Event, + openingErrors: Seq[ProtoDeserializationError], + isIgnored: Boolean, +) diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SubmissionRequest.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SubmissionRequest.scala new file mode 100644 index 0000000000..99f0e6e08c --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SubmissionRequest.scala @@ -0,0 +1,284 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.digitalasset.canton.config.RequireTypes.{InvariantViolation, NonNegativeInt} +import com.digitalasset.canton.crypto.{HashOps, HashPurpose} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.protocol.v1 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{ + DeterministicEncoding, + ProtoConverter, + ProtocolVersionedMemoizedEvidence, +} +import com.digitalasset.canton.topology.Member +import com.digitalasset.canton.util.EitherUtil +import com.digitalasset.canton.version.{ + HasMemoizedProtocolVersionedWithContextCompanion, + HasProtocolVersionedWrapper, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.ByteString + +/** @param aggregationRule If [[scala.Some$]], this submission request is aggregatable. + * Its envelopes will be delivered only when the rule's conditions are met. + * The receipt of delivery for an aggregatable submission will be delivered immediately to the sender + * even if the rule's conditions are not met. + */ +final case class SubmissionRequest private ( + sender: Member, + messageId: MessageId, + isRequest: Boolean, + batch: Batch[ClosedEnvelope], + maxSequencingTime: CantonTimestamp, + timestampOfSigningKey: Option[CantonTimestamp], + aggregationRule: Option[AggregationRule], +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + SubmissionRequest.type + ], + override val deserializedFrom: Option[ByteString] = None, +) extends HasProtocolVersionedWrapper[SubmissionRequest] + with ProtocolVersionedMemoizedEvidence { + // Ensures the invariants related to default values hold + validateInstance().valueOr(err => throw new IllegalArgumentException(err)) + + @transient override protected lazy val companionObj: SubmissionRequest.type = SubmissionRequest + + // Caches the serialized request to be able to do checks on its size without re-serializing + lazy val toProtoV1: v1.SubmissionRequest = v1.SubmissionRequest( + sender = sender.toProtoPrimitive, + messageId = messageId.toProtoPrimitive, + isRequest = isRequest, + batch = Some(batch.toProtoV1), + maxSequencingTime = Some(maxSequencingTime.toProtoPrimitive), + timestampOfSigningKey = timestampOfSigningKey.map(_.toProtoPrimitive), + aggregationRule = aggregationRule.map(_.toProtoV0), + ) + + @VisibleForTesting + def copy( + sender: Member = this.sender, + messageId: MessageId = this.messageId, + isRequest: Boolean = this.isRequest, + batch: Batch[ClosedEnvelope] = this.batch, + maxSequencingTime: CantonTimestamp = this.maxSequencingTime, + timestampOfSigningKey: Option[CantonTimestamp] = this.timestampOfSigningKey, + aggregationRule: Option[AggregationRule] = this.aggregationRule, + ) = SubmissionRequest + .create( + sender, + messageId, + isRequest, + batch, + maxSequencingTime, + timestampOfSigningKey, + aggregationRule, + representativeProtocolVersion, + ) + .valueOr(err => throw new IllegalArgumentException(err.message)) + + def isConfirmationRequest(mediator: Member): Boolean = + batch.envelopes.exists( + _.recipients.allRecipients.forgetNE == Set(MemberRecipient(mediator)) + ) && batch.envelopes.exists(e => + e.recipients.allRecipients.forgetNE != Set(MemberRecipient(mediator)) + ) + + def isConfirmationResponse(mediator: Member): Boolean = + batch.envelopes.nonEmpty && batch.envelopes.forall( + _.recipients.allRecipients.forgetNE == Set(MemberRecipient(mediator)) + ) + + def isMediatorResult(mediator: Member): Boolean = batch.envelopes.nonEmpty && sender == mediator + + override protected[this] def toByteStringUnmemoized: ByteString = + super[HasProtocolVersionedWrapper].toByteString + + /** Returns the [[AggregationId]] for grouping if this is an aggregatable submission. + * The aggregation ID computationally authenticates the relevant contents of the submission request, namely, + *

    + *
  • Envelope contents [[com.digitalasset.canton.sequencing.protocol.ClosedEnvelope.bytes]], + * the recipients [[com.digitalasset.canton.sequencing.protocol.ClosedEnvelope.recipients]] of the [[batch]], + * and whether there are signatures. + *
  • The [[maxSequencingTime]]
  • + *
  • The [[timestampOfSigningKey]]
  • + *
  • The [[aggregationRule]]
  • + *
+ * + * The [[AggregationId]] does not authenticate the following pieces of a submission request: + *
    + *
  • The signatures [[com.digitalasset.canton.sequencing.protocol.ClosedEnvelope.signatures]] on the closed envelopes + * because the signatures differ for each sender. Aggregating the signatures is the whole point of an aggregatable submission. + * In contrast, the presence of signatures is relevant for the ID because it determines how the + * [[com.digitalasset.canton.sequencing.protocol.ClosedEnvelope.bytes]] are interpreted. + *
  • + *
  • The [[sender]] and the [[messageId]], as they are specific to the sender of a particular submission request
  • + *
  • The [[isRequest]] flag because it is irrelevant for delivery or aggregation
  • + *
+ */ + def aggregationId(hashOps: HashOps): Option[AggregationId] = aggregationRule.map { rule => + val builder = hashOps.build(HashPurpose.AggregationId) + builder.add(batch.envelopes.length) + batch.envelopes.foreach { envelope => + val ClosedEnvelope(content, recipients, signatures) = envelope + builder.add(DeterministicEncoding.encodeBytes(content)) + builder.add( + DeterministicEncoding.encodeBytes( + // TODO(#12075) Use a deterministic serialization scheme for the recipients + recipients.toProtoV0.toByteString + ) + ) + builder.add(DeterministicEncoding.encodeByte(if (signatures.isEmpty) 0x00 else 0x01)) + } + builder.add(maxSequencingTime.underlying.micros) + // CantonTimestamp's microseconds can never be Long.MinValue, so the encoding remains injective if we use Long.MaxValue as the default. + builder.add(timestampOfSigningKey.fold(Long.MinValue)(_.underlying.micros)) + builder.add(rule.eligibleSenders.size) + rule.eligibleSenders.foreach(member => builder.add(member.toProtoPrimitive)) + builder.add(rule.threshold.value) + val hash = builder.finish() + AggregationId(hash) + } +} +sealed trait MaxRequestSizeToDeserialize { + val toOption: Option[NonNegativeInt] = this match { + case MaxRequestSizeToDeserialize.Limit(value) => Some(value) + case MaxRequestSizeToDeserialize.NoLimit => None + } +} +object MaxRequestSizeToDeserialize { + final case class Limit(value: NonNegativeInt) extends MaxRequestSizeToDeserialize + case object NoLimit extends MaxRequestSizeToDeserialize +} + +object SubmissionRequest + extends HasMemoizedProtocolVersionedWithContextCompanion[ + SubmissionRequest, + MaxRequestSizeToDeserialize, + ] { + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter( + ProtocolVersion.v30 + )(v1.SubmissionRequest)( + supportedProtoVersionMemoized(_)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + override def name: String = "submission request" + + override lazy val invariants = Seq(aggregationRuleDefaultValue, timestampOfSigningKeyInvariant) + + lazy val aggregationRuleDefaultValue + : SubmissionRequest.DefaultValueUntilExclusive[Option[AggregationRule]] = + DefaultValueUntilExclusive( + _.aggregationRule, + "aggregationRule", + protocolVersionRepresentativeFor(ProtoVersion(1)), + None, + ) + + lazy val timestampOfSigningKeyInvariant = new Invariant { + override def validateInstance( + v: SubmissionRequest, + rpv: SubmissionRequest.ThisRepresentativeProtocolVersion, + ): Either[String, Unit] = + EitherUtil.condUnitE( + v.aggregationRule.isEmpty || v.timestampOfSigningKey.isDefined, + s"Submission request has `aggregationRule` set, but `timestampOfSigningKey` is not defined. Please check that `timestampOfSigningKey` has been set for the submission.", + ) + } + + def create( + sender: Member, + messageId: MessageId, + isRequest: Boolean, + batch: Batch[ClosedEnvelope], + maxSequencingTime: CantonTimestamp, + timestampOfSigningKey: Option[CantonTimestamp], + aggregationRule: Option[AggregationRule], + representativeProtocolVersion: RepresentativeProtocolVersion[SubmissionRequest.type], + ): Either[InvariantViolation, SubmissionRequest] = + Either + .catchOnly[IllegalArgumentException]( + new SubmissionRequest( + sender, + messageId, + isRequest, + batch, + maxSequencingTime, + timestampOfSigningKey, + aggregationRule, + )(representativeProtocolVersion, deserializedFrom = None) + ) + .leftMap(error => InvariantViolation(error.getMessage)) + + def tryCreate( + sender: Member, + messageId: MessageId, + isRequest: Boolean, + batch: Batch[ClosedEnvelope], + maxSequencingTime: CantonTimestamp, + timestampOfSigningKey: Option[CantonTimestamp], + aggregationRule: Option[AggregationRule], + protocolVersion: ProtocolVersion, + ): SubmissionRequest = + create( + sender, + messageId, + isRequest, + batch, + maxSequencingTime, + timestampOfSigningKey, + aggregationRule, + protocolVersionRepresentativeFor(protocolVersion), + ).valueOr(err => throw new IllegalArgumentException(err.message)) + + def fromProtoV1( + maxRequestSize: MaxRequestSizeToDeserialize, + requestP: v1.SubmissionRequest, + )(bytes: ByteString): ParsingResult[SubmissionRequest] = { + val v1.SubmissionRequest( + senderP, + messageIdP, + isRequest, + batchP, + maxSequencingTimeP, + timestampOfSigningKey, + aggregationRuleP, + ) = requestP + + for { + sender <- Member.fromProtoPrimitive(senderP, "sender") + messageId <- MessageId.fromProtoPrimitive(messageIdP) + maxSequencingTime <- ProtoConverter.parseRequired( + CantonTimestamp.fromProtoPrimitive, + "SubmissionRequest.maxSequencingTime", + maxSequencingTimeP, + ) + batch <- ProtoConverter.parseRequired( + Batch.fromProtoV1(_, maxRequestSize), + "SubmissionRequest.batch", + batchP, + ) + ts <- timestampOfSigningKey.traverse(CantonTimestamp.fromProtoPrimitive) + aggregationRule <- aggregationRuleP.traverse(AggregationRule.fromProtoV0) + } yield new SubmissionRequest( + sender, + messageId, + isRequest, + batch, + maxSequencingTime, + ts, + aggregationRule, + )(protocolVersionRepresentativeFor(ProtoVersion(1)), Some(bytes)) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SubscriptionRequest.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SubscriptionRequest.scala new file mode 100644 index 0000000000..2fdccabbaf --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SubscriptionRequest.scala @@ -0,0 +1,63 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import com.digitalasset.canton.SequencerCounter +import com.digitalasset.canton.domain.api.v0 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.Member +import com.digitalasset.canton.version.{ + HasProtocolVersionedCompanion, + HasProtocolVersionedWrapper, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} + +/** A request to receive events from a given counter from a sequencer. + * + * @param member the member subscribing to the sequencer + * @param counter the counter of the first event to receive. + */ +final case class SubscriptionRequest(member: Member, counter: SequencerCounter)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + SubscriptionRequest.type + ] +) extends HasProtocolVersionedWrapper[SubscriptionRequest] { + + @transient override protected lazy val companionObj: SubscriptionRequest.type = + SubscriptionRequest + + def toProtoV0: v0.SubscriptionRequest = v0.SubscriptionRequest(member.toProtoPrimitive, counter.v) +} + +object SubscriptionRequest extends HasProtocolVersionedCompanion[SubscriptionRequest] { + override val name: String = "SubscriptionRequest" + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> VersionedProtoConverter(ProtocolVersion.v30)(v0.SubscriptionRequest)( + supportedProtoVersion(_)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + def apply( + member: Member, + counter: SequencerCounter, + protocolVersion: ProtocolVersion, + ): SubscriptionRequest = + SubscriptionRequest(member, counter)(protocolVersionRepresentativeFor(protocolVersion)) + + def fromProtoV0( + subscriptionRequestP: v0.SubscriptionRequest + ): ParsingResult[SubscriptionRequest] = { + val v0.SubscriptionRequest(memberP, counter) = subscriptionRequestP + for { + member <- Member.fromProtoPrimitive(memberP, "member") + } yield SubscriptionRequest(member, SequencerCounter(counter))( + protocolVersionRepresentativeFor(ProtoVersion(0)) + ) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SubscriptionResponse.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SubscriptionResponse.scala new file mode 100644 index 0000000000..e2f8633d06 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SubscriptionResponse.scala @@ -0,0 +1,33 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import cats.syntax.traverse.* +import com.digitalasset.canton.domain.api.v0 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.tracing.TraceContext + +final case class SubscriptionResponse( + signedSequencedEvent: SignedContent[SequencedEvent[ClosedEnvelope]], + traceContext: TraceContext, + trafficState: Option[SequencedEventTrafficState], +) + +object SubscriptionResponse { + def fromVersionedProtoV0(responseP: v0.VersionedSubscriptionResponse)(implicit + traceContext: TraceContext + ): ParsingResult[SubscriptionResponse] = { + val v0.VersionedSubscriptionResponse( + signedSequencedEvent, + _ignoredTraceContext, + trafficStateP, + ) = responseP + for { + signedContent <- SignedContent.fromByteString(signedSequencedEvent) + signedSequencedEvent <- signedContent.deserializeContent(SequencedEvent.fromByteString) + trafficState <- trafficStateP.traverse(SequencedEventTrafficState.fromProtoV0) + } yield SubscriptionResponse(signedSequencedEvent, traceContext, trafficState) + + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TopologyStateForInitRequest.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TopologyStateForInitRequest.scala new file mode 100644 index 0000000000..c808d5589e --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TopologyStateForInitRequest.scala @@ -0,0 +1,58 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import com.digitalasset.canton.domain.api.v0 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.Member +import com.digitalasset.canton.version.* + +/** A request to receive the topology state for initialization + * + * @param member the member subscribing to the sequencer + */ +final case class TopologyStateForInitRequest(member: Member)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + TopologyStateForInitRequest.type + ] +) extends HasProtocolVersionedWrapper[TopologyStateForInitRequest] { + + @transient override protected lazy val companionObj: TopologyStateForInitRequest.type = + TopologyStateForInitRequest + + def toProtoV0: v0.TopologyStateForInitRequest = + v0.TopologyStateForInitRequest(member.toProtoPrimitive) +} + +object TopologyStateForInitRequest + extends HasProtocolVersionedCompanion[TopologyStateForInitRequest] { + override val name: String = "TopologyStateForInitRequest" + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> VersionedProtoConverter(ProtocolVersion.v30)( + v0.TopologyStateForInitRequest + )( + supportedProtoVersion(_)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + def apply( + member: Member, + protocolVersion: ProtocolVersion, + ): TopologyStateForInitRequest = + TopologyStateForInitRequest(member)(protocolVersionRepresentativeFor(protocolVersion)) + + def fromProtoV0( + topologyStateForInitRequestP: v0.TopologyStateForInitRequest + ): ParsingResult[TopologyStateForInitRequest] = { + val v0.TopologyStateForInitRequest(memberP) = topologyStateForInitRequestP + for { + member <- Member.fromProtoPrimitive(memberP, "member") + } yield TopologyStateForInitRequest(member)( + protocolVersionRepresentativeFor(ProtoVersion(0)) + ) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TopologyStateForInitResponse.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TopologyStateForInitResponse.scala new file mode 100644 index 0000000000..c2695c0856 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TopologyStateForInitResponse.scala @@ -0,0 +1,42 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import com.digitalasset.canton.domain.api.v0 +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX +import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.GenericStoredTopologyTransactionsX +import com.digitalasset.canton.tracing.{TraceContext, Traced} + +final case class TopologyStateForInitResponse( + topologyTransactions: Traced[GenericStoredTopologyTransactionsX] +) { + def toProtoV0: v0.TopologyStateForInitResponse = + v0.TopologyStateForInitResponse( + topologyTransactions = Some(topologyTransactions.value.toProtoV0) + ) +} + +object TopologyStateForInitResponse { + + def fromProtoV0(responseP: v0.TopologyStateForInitResponse)(implicit + traceContext: TraceContext + ): ParsingResult[TopologyStateForInitResponse] = { + val v0.TopologyStateForInitResponse( + topologyTransactionsP + ) = { + responseP + } + for { + topologyTransactions <- ProtoConverter.parseRequired( + StoredTopologyTransactionsX.fromProtoV0, + "topology_transactions", + topologyTransactionsP, + ) + } yield TopologyStateForInitResponse( + Traced(topologyTransactions)(traceContext) + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TrafficState.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TrafficState.scala new file mode 100644 index 0000000000..10171eba2e --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TrafficState.scala @@ -0,0 +1,68 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.sequencing.protocol + +import cats.syntax.apply.* +import com.digitalasset.canton.config.RequireTypes +import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveLong} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.store.db.RequiredTypesCodec.nonNegativeLongOptionGetResult +import slick.jdbc.{GetResult, SetParameter} + +/** Traffic state stored in the sequencer per event needed for enforcing traffic control */ +final case class TrafficState( + extraTrafficRemainder: NonNegativeLong, + extraTrafficConsumed: NonNegativeLong, + baseTrafficRemainder: NonNegativeLong, + timestamp: CantonTimestamp, +) { + lazy val extraTrafficLimit: Option[PositiveLong] = + PositiveLong.create((extraTrafficRemainder + extraTrafficConsumed).value).toOption + + def update( + newExtraTrafficLimit: NonNegativeLong, + timestamp: CantonTimestamp, + ): Either[RequireTypes.InvariantViolation, TrafficState] = { + NonNegativeLong.create(newExtraTrafficLimit.value - extraTrafficConsumed.value).map { + newRemainder => + this.copy( + timestamp = timestamp, + extraTrafficRemainder = newRemainder, + ) + } + } + + def toSequencedEventTrafficState: SequencedEventTrafficState = SequencedEventTrafficState( + extraTrafficRemainder = extraTrafficRemainder, + extraTrafficConsumed = extraTrafficConsumed, + ) +} + +object TrafficState { + + implicit val setResultParameter: SetParameter[TrafficState] = { (v: TrafficState, pp) => + pp >> Some(v.extraTrafficRemainder.value) + pp >> Some(v.extraTrafficConsumed.value) + pp >> Some(v.baseTrafficRemainder.value) + pp >> v.timestamp + } + + implicit val getResultTrafficState: GetResult[Option[TrafficState]] = { + GetResult + .createGetTuple4( + nonNegativeLongOptionGetResult, + nonNegativeLongOptionGetResult, + nonNegativeLongOptionGetResult, + CantonTimestamp.getResultOptionTimestamp, + ) + .andThen(_.mapN(TrafficState.apply)) + } + + def empty(timestamp: CantonTimestamp): TrafficState = TrafficState( + NonNegativeLong.zero, + NonNegativeLong.zero, + NonNegativeLong.zero, + timestamp, + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/serialization/DeterministicEncoding.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/serialization/DeterministicEncoding.scala new file mode 100644 index 0000000000..1cb6b3f4f7 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/serialization/DeterministicEncoding.scala @@ -0,0 +1,277 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.serialization + +import cats.syntax.either.* +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.{LfPartyId, ProtoDeserializationError} +import com.google.protobuf.ByteString + +import java.nio.{ByteBuffer, ByteOrder} +import java.time.Instant +import scala.annotation.tailrec + +sealed trait DeserializationError extends PrettyPrinting { + val message: String + + override def pretty: Pretty[DeserializationError] = + prettyOfClass( + param("message", _.message.unquoted) + ) + def toProtoDeserializationError: ProtoDeserializationError = + this match { + case DefaultDeserializationError(message) => + ProtoDeserializationError.OtherError(message) + case MaxByteToDecompressExceeded(message) => + ProtoDeserializationError.MaxBytesToDecompressExceeded(message) + } +} +final case class DefaultDeserializationError(message: String) extends DeserializationError +final case class MaxByteToDecompressExceeded(message: String) extends DeserializationError + +/** The methods in this object should be used when a deterministic encoding is + * needed. They are not meant for computing serializations for a wire format. Protobuf is a better choice there. + */ +object DeterministicEncoding { + + /** Tests that the given [[com.google.protobuf.ByteString]] has at least `len` bytes and splits the [[com.google.protobuf.ByteString]] at `len`. */ + def splitAt( + len: Int, + bytes: ByteString, + ): Either[DeserializationError, (ByteString, ByteString)] = + if (bytes.size < len) + Left(DefaultDeserializationError(s"Expected $len bytes")) + else + Right((bytes.substring(0, len), bytes.substring(len))) + + /** Encode a [[scala.Byte]] into a [[com.google.protobuf.ByteString]]. */ + def encodeByte(b: Byte): ByteString = ByteString.copyFrom(Array[Byte](b)) + + /** Encode a ByteString (of given length) into another ByteString */ + def encodeBytes(b: ByteString): ByteString = + encodeInt(b.size).concat(b) + + /** Extract a byte-string (length stored) from another ByteString */ + def decodeBytes( + bytes: ByteString + ): Either[DeserializationError, (ByteString, ByteString)] = + for { + lenAndContent <- decodeLength(bytes) + (len, content) = lenAndContent + bytesAndRest <- splitAt(len, content) + } yield bytesAndRest + + /** Encode an [[scala.Int]] into a fixed-length [[com.google.protobuf.ByteString]] in big-endian order. */ + def encodeInt(i: Int): ByteString = + ByteString.copyFrom( + ByteBuffer.allocate(Integer.BYTES).order(ByteOrder.BIG_ENDIAN).putInt(i).array() + ) + + /** Encodes the [[scala.Long]] into a unsigned variable integer according to https://github.com/multiformats/unsigned-varint */ + @SuppressWarnings(Array("org.wartremover.warts.Var", "org.wartremover.warts.While")) + def encodeUVarInt(i: Long): ByteString = { + require(i >= 0, "Only unsigned integers can be encoded to var-int") + + var bs = ByteString.EMPTY + var x = i + + // If value is larger than 7 bit + while (x > 0x7f) { + // Write 7 bits of input value with continuation MSB set + val byte: Byte = ((x & 0x7f) | 0x80).toByte + bs = bs.concat(encodeByte(byte)) + + // Shift out the 7 bits that have been written + x >>>= 7 + } + + // Write the final 7 bits + bs.concat(encodeByte((x & 0x7f).toByte)) + } + + /** Decodes a unsigned variable integer according to https://github.com/multiformats/unsigned-varint */ + def decodeUVarInt(bytes: ByteString): Either[DeserializationError, (Long, ByteString)] = { + + // Returns a tuple of output varint and index to last consumed byte + @tailrec + def decodeUVarIntBytes(output: Long, index: Int, shift: Int): Either[String, (Long, Int)] = { + if (index >= bytes.size) + Left("Input bytes already consumed") + // Only consume maximum of 9 bytes according to spec + else if (index > 8) + Left("Varint too long") + else { + val nextByte = bytes.byteAt(index) + val out = output | (nextByte & 0x7f) << shift + // the continuation MSB is set + if ((nextByte & 0x80) != 0) { + decodeUVarIntBytes(out, index + 1, shift + 7) + } else { + Right((out, index)) + } + } + } + + decodeUVarIntBytes(0, 0, 0).bimap( + err => DefaultDeserializationError(s"Failed to decode unsigned var-int: $err"), + { case (output, index) => + (output, bytes.substring(index + 1)) + }, + ) + + } + + /** Decode a length parameter and do some sanity checks */ + private def decodeLength( + bytes: ByteString + ): Either[DeserializationError, (Int, ByteString)] = + for { + intAndB <- decodeInt(bytes) + (len, rest) = intAndB + _ <- Either.cond( + len >= 0, + (), + DefaultDeserializationError(s"Negative length of $len in encoded data"), + ) + _ <- Either.cond( + len <= rest.size, + (), + DefaultDeserializationError(s"Length $len is larger than received bytes"), + ) + } yield intAndB + + /** Consume and decode a fixed-length big-endian [[scala.Int]] and return the remainder of the [[com.google.protobuf.ByteString]]. + * + * Inverse to [[DeterministicEncoding.encodeInt]] + */ + def decodeInt(bytes: ByteString): Either[DeserializationError, (Int, ByteString)] = + for { + intBytesAndRest <- splitAt(Integer.BYTES, bytes) + (intBytes, rest) = intBytesAndRest + } yield ( + ByteBuffer + .allocate(Integer.BYTES) + .order(ByteOrder.BIG_ENDIAN) + .put(intBytes.toByteArray) + .getInt(0), + rest, + ) + + /** Encode a [[scala.Long]] into a fixed-length [[com.google.protobuf.ByteString]] in big-endian order. */ + def encodeLong(l: Long): ByteString = + ByteString.copyFrom( + ByteBuffer.allocate(java.lang.Long.BYTES).order(ByteOrder.BIG_ENDIAN).putLong(l).array() + ) + + /** Decode a [[scala.Long]] from a [[com.google.protobuf.ByteString]] and return the remainder of the [[com.google.protobuf.ByteString]]. + * + * Inverse to [[DeterministicEncoding.encodeLong]] + */ + def decodeLong(bytes: ByteString): Either[DeserializationError, (Long, ByteString)] = + for { + longBytesAndRest <- splitAt(java.lang.Long.BYTES, bytes) + (longBytes, rest) = longBytesAndRest + } yield ( + ByteBuffer + .allocate(java.lang.Long.BYTES) + .order(ByteOrder.BIG_ENDIAN) + .put(longBytes.toByteArray) + .getLong(0), + rest, + ) + + /** Encode a [[java.lang.String]] into a [[com.google.protobuf.ByteString]], prefixing the string content with its length. + */ + def encodeString(s: String): ByteString = + encodeBytes(ByteString.copyFromUtf8(s)) + + /** Decode a [[java.lang.String]] from a length-prefixed [[com.google.protobuf.ByteString]] and return the remainder of the [[com.google.protobuf.ByteString]]. + * + * Inverse to [[DeterministicEncoding.encodeString]] + */ + def decodeString(bytes: ByteString): Either[DeserializationError, (String, ByteString)] = + for { + stringBytesAndBytes <- decodeBytes(bytes) + (stringBytes, rest) = stringBytesAndBytes + } yield (stringBytes.toStringUtf8, rest) + + /** Encode an [[java.time.Instant]] into a [[com.google.protobuf.ByteString]] */ + def encodeInstant(instant: Instant): ByteString = + encodeLong(instant.getEpochSecond).concat(encodeInt(instant.getNano)) + + /** Decode a [[java.time.Instant]] from a [[com.google.protobuf.ByteString]] and return the remainder of the [[com.google.protobuf.ByteString]]. + * + * Inverse to [[DeterministicEncoding.encodeInstant]] + */ + def decodeInstant( + bytes: ByteString + ): Either[DeserializationError, (Instant, ByteString)] = { + for { + longAndBytes <- decodeLong(bytes) + (long, bytes) = longAndBytes + intAndBytes <- decodeInt(bytes) + (int, bytes) = intAndBytes + } yield (Instant.ofEpochSecond(long, int.toLong), bytes) + } + + /** Encode an [[LfPartyId]] into a [[com.google.protobuf.ByteString]], using the underlying string */ + def encodeParty(party: LfPartyId): ByteString = + encodeString(party) + + /** Encode an [[scala.Option]] into a tagged [[com.google.protobuf.ByteString]], using the given `encode` function. */ + def encodeOptionWith[A](option: Option[A])(encode: A => ByteString): ByteString = { + option match { + case None => encodeByte(0) + case Some(x) => encodeByte(1).concat(encode(x)) + } + } + + /** Encode a [[scala.Seq]] into a [[com.google.protobuf.ByteString]] using the given encoding function, + * prefixing it with the length of the [[scala.Seq]] + */ + def encodeSeqWith[A](seq: Seq[A])(encode: A => ByteString): ByteString = { + import scala.jdk.CollectionConverters.* + DeterministicEncoding + .encodeInt(seq.length) + .concat(ByteString.copyFrom(seq.map(encode).asJava)) + } + + def decodeSeqWith[A](bytes: ByteString)( + decode: ByteString => Either[DeserializationError, (A, ByteString)] + ): Either[DeserializationError, (Seq[A], ByteString)] = { + def iterate( + col: Seq[A], + num: Int, + bytes: ByteString, + ): Either[DeserializationError, (Seq[A], ByteString)] = { + if (num == 0) { + Right((col, bytes)) + } else { + decode(bytes).flatMap { case (elem, rest) => + iterate(col :+ elem, num - 1, rest) + } + } + } + for { + lengthAndRest <- DeterministicEncoding.decodeInt(bytes) + (len, rest) = lengthAndRest + dc <- iterate(Seq(), len, rest) + } yield dc + } + + /** Encode an [[scala.Either]] of [[com.google.protobuf.ByteString]]s into a tagged [[com.google.protobuf.ByteString]]. */ + def encodeEitherWith[L, R]( + either: Either[L, R] + )(encodeL: L => ByteString, encodeR: R => ByteString): ByteString = + either match { + case Left(l) => encodeByte(0).concat(encodeL(l)) + case Right(r) => encodeByte(2).concat(encodeR(r)) + } + + /** Encode a pair of [[com.google.protobuf.ByteString]]s as an untagged [[com.google.protobuf.ByteString]] */ + def encodeTuple2With[A, B]( + pair: (A, B) + )(encodeA: A => ByteString, encodeB: B => ByteString): ByteString = + encodeA(pair._1).concat(encodeB(pair._2)) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/serialization/HasCryptographicEvidence.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/serialization/HasCryptographicEvidence.scala new file mode 100644 index 0000000000..31d2ce4c21 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/serialization/HasCryptographicEvidence.scala @@ -0,0 +1,131 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.serialization + +import com.digitalasset.canton.util.NoCopy +import com.digitalasset.canton.version.HasRepresentativeProtocolVersion +import com.google.protobuf.ByteString + +/** Trait for deterministically serializing an object to a [[com.google.protobuf.ByteString]]. + * + * A class should extend this trait to indicate that the `serialize` method yields the same `ByteString` if invoked + * several times. + * Typical use cases of this behavior include: + *
    + *
  • Classes that represent a leaf of a Merkle tree.
  • + *
  • Classes that represent content that is signed e.g. Hash or SignedProtocolMessageContent.
  • + *
  • Classes that are mere wrappers of ByteString (for convenience of the caller) e.g. AuthenticationToken.
  • + *
+ * '''If a class merely represents content that is transmitted over a network, the class does not need to extend this + * trait.''' + * + * It is strongly recommended to extend this trait by mixing in [[ProtocolVersionedMemoizedEvidence]] or + * [[MemoizedEvidenceWithFailure]], instead of directly extending this trait. + * + * Classes `C` implementing [[HasCryptographicEvidence]] must define a + * `fromByteString: ByteString => C` method in their companion object + * that converts a serialization back into an equal object. + * In particular, `c.fromByteString(byteString).toByteString` must equal `byteString`. + */ +trait HasCryptographicEvidence { + + /** Returns the serialization of the object into a [[com.google.protobuf.ByteString]]. + * In particular, every instance `i` of this trait must equal `fromByteString(i.toByteString)`. + * + * This method must yield the same result if it is invoked several times. + */ + def getCryptographicEvidence: ByteString +} + +/** Effectively immutable [[HasCryptographicEvidence]] classes can mix in this trait + * to implement the memoization logic. + * + * Use this class if serialization always succeeds. + * + * Make sure that `fromByteString(byteString).deserializedFrom` equals `Some(byteString)`. + * + * Make sure that every public constructor and apply method yields an instance with `deserializedFrom == None`. + * + * @see MemoizedEvidenceWithFailure if serialization may fail + */ +trait ProtocolVersionedMemoizedEvidence + extends HasCryptographicEvidence + with HasRepresentativeProtocolVersion { + + /** Returns the [[com.google.protobuf.ByteString]] from which this object has been deserialized, if any. + * If defined, [[getCryptographicEvidence]] will use this as the serialization. + */ + def deserializedFrom: Option[ByteString] + + /** Computes the serialization of the object as a [[com.google.protobuf.ByteString]]. + * + * Must meet the contract of [[getCryptographicEvidence]] + * except that when called several times, different [[com.google.protobuf.ByteString]]s may be returned. + */ + protected[this] def toByteStringUnmemoized: ByteString + + final override lazy val getCryptographicEvidence: ByteString = { + deserializedFrom match { + case Some(bytes) => bytes + case None => toByteStringUnmemoized + } + } +} + +/** Thrown by [[MemoizedEvidenceWithFailure]] classes during object construction if the serialization fails. + * + * @param serializationError The error raised during the serialization + * @tparam E The type of errors that serialization may return + */ +final case class SerializationCheckFailed[+E](serializationError: E) extends Exception + +/** Effectively immutable [[HasCryptographicEvidence]] classes can mix in this trait + * to implement the memoization logic. + * + * Use this class if serialization may fail. This mix-in checks whenever an object is constructed that + * either a serialization is given or that serialization will succeed. + * It also ensures that no `copy` method is generated for case classes with this mixin. + * + * Make sure that `fromByteString(byteString).deserializedFrom` equals `Some(byteString)`. + * + * Make sure that every public constructor and apply method yields an instance with `deserializedFrom == None`. + * + * @tparam SerializationError The type of serialization errors + * @see MemoizedEvidence if serialization always succeeds. + * @throws SerializationCheckFailed if the serialization fails + */ +trait MemoizedEvidenceWithFailure[SerializationError] extends HasCryptographicEvidence with NoCopy { + + /** Returns the [[com.google.protobuf.ByteString]] from which this object has been deserialized, if any. + * If defined, [[getCryptographicEvidence]] will use this as the serialization. + */ + protected[this] def deserializedFrom: Option[ByteString] + + /** Computes the serialization of the object as a [[com.google.protobuf.ByteString]] or produces a `SerializationError` + * if serialization fails. + * + * Must meet the contract of [[getCryptographicEvidence]] + * except that different [[com.google.protobuf.ByteString]]s may be returned when called several times and + * it may fail at any time. + */ + protected[this] def toByteStringChecked: Either[SerializationError, ByteString] + + @throws[SerializationCheckFailed[SerializationError]] + final override val getCryptographicEvidence: ByteString = { + deserializedFrom match { + case Some(bytes) => bytes + case None => + toByteStringChecked match { + case Left(error) => throw new SerializationCheckFailed[SerializationError](error) + case Right(bytes) => bytes + } + } + } +} + +/** Wraps a [[com.google.protobuf.ByteString]] so that it is its own cryptographic evidence. */ +final case class BytestringWithCryptographicEvidence(bytes: ByteString) + extends HasCryptographicEvidence { + override def getCryptographicEvidence: ByteString = bytes +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/serialization/ProtoConverter.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/serialization/ProtoConverter.scala new file mode 100644 index 0000000000..0b91e70e81 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/serialization/ProtoConverter.scala @@ -0,0 +1,195 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.serialization + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.daml.lf.data.Ref +import com.daml.nonempty.NonEmpty +import com.daml.nonempty.catsinstances.* +import com.digitalasset.canton.ProtoDeserializationError.{ + BufferException, + FieldNotSet, + StringConversionError, + TimestampConversionError, +} +import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt, PositiveLong} +import com.digitalasset.canton.protocol.{LfContractId, LfTemplateId} +import com.digitalasset.canton.util.OptionUtil +import com.digitalasset.canton.{ + LedgerApplicationId, + LedgerParticipantId, + LedgerSubmissionId, + LedgerTransactionId, + LfPartyId, + LfWorkflowId, + ProtoDeserializationError, +} +import com.google.protobuf.timestamp.Timestamp +import com.google.protobuf.{ByteString, CodedInputStream, InvalidProtocolBufferException} + +import java.time.{DateTimeException, Duration, Instant} +import java.util.UUID + +/** Can convert messages to and from proto objects + * @tparam A type of the message to be serialized + * @tparam Proto type of the proto message + * @tparam Err type of deserialization errors + */ +trait ProtoConverter[A, Proto, Err] { + + /** Convert an instance to a protobuf structure + * @param value to be serialized + * @return serialized proto + */ + def toProtoPrimitive(value: A): Proto + + /** Convert proto value to its native type + * @param value to be deserialized + * @return deserialized value + */ + def fromProtoPrimitive(value: Proto): Either[Err, A] +} + +object ProtoConverter { + type ParsingResult[+T] = Either[ProtoDeserializationError, T] + + /** Helper to convert protobuf exceptions into ProtoDeserializationErrors + * + * i.e. usage: ProtoConverter.protoParser(v0.MessageContent.parseFrom) + */ + def protoParser[A](parseFrom: CodedInputStream => A): ByteString => Either[BufferException, A] = + bytes => + Either + .catchOnly[InvalidProtocolBufferException](parseFrom(bytes.newCodedInput)) + .leftMap(BufferException) + + def protoParserArray[A](parseFrom: Array[Byte] => A): Array[Byte] => Either[BufferException, A] = + bytes => + Either.catchOnly[InvalidProtocolBufferException](parseFrom(bytes)).leftMap(BufferException) + + /** Helper for extracting an optional field where the value is required + * @param field the field name + * @param optValue the optional value + * @return a [[scala.Right$]] of the value if set or + * a [[scala.Left$]] of [[com.digitalasset.canton.ProtoDeserializationError.FieldNotSet]] error + */ + def required[B](field: String, optValue: Option[B]): Either[FieldNotSet, B] = + optValue.toRight(FieldNotSet(field)) + + def parseRequired[A, P]( + fromProto: P => ParsingResult[A], + field: String, + optValue: Option[P], + ): ParsingResult[A] = + required(field, optValue).flatMap(fromProto) + + def parse[A, P]( + parseFrom: CodedInputStream => P, + fromProto: P => ParsingResult[A], + value: ByteString, + ): ParsingResult[A] = + protoParser(parseFrom)(value).flatMap(fromProto) + + def parseRequiredNonEmpty[A, P]( + fromProto: P => ParsingResult[A], + field: String, + content: Seq[P], + ): ParsingResult[NonEmpty[Seq[A]]] = + for { + contentNE <- NonEmpty + .from(content) + .toRight(ProtoDeserializationError.FieldNotSet(s"Sequence $field not set or empty")) + parsed <- contentNE.toNEF.traverse(fromProto) + } yield parsed + + def parsePositiveInt(i: Int): ParsingResult[PositiveInt] = + PositiveInt.create(i).leftMap(ProtoDeserializationError.InvariantViolation(_)) + + def parsePositiveLong(l: Long): ParsingResult[PositiveLong] = + PositiveLong.create(l).leftMap(ProtoDeserializationError.InvariantViolation(_)) + + def parseNonNegativeLong(l: Long): ParsingResult[NonNegativeLong] = + NonNegativeLong.create(l).leftMap(ProtoDeserializationError.InvariantViolation(_)) + + def parseLfPartyId(party: String): ParsingResult[LfPartyId] = + parseString(party)(LfPartyId.fromString) + + def parseLfPartyIdO(party: String): ParsingResult[Option[LfPartyId]] = + Option.when(party.nonEmpty)(parseLfPartyId(party)).sequence + + def parseLfParticipantId(party: String): ParsingResult[LedgerParticipantId] = + parseString(party)(LedgerParticipantId.fromString) + + def parseLFApplicationId(applicationId: String): ParsingResult[LedgerApplicationId] = + parseString(applicationId)(LedgerApplicationId.fromString) + + def parseLFSubmissionIdO(submissionId: String): ParsingResult[Option[LedgerSubmissionId]] = + Option + .when(submissionId.nonEmpty)(parseLFSubmissionId(submissionId)) + .sequence + + def parseLFSubmissionId(submissionId: String): ParsingResult[LedgerSubmissionId] = + parseString(submissionId)(LedgerSubmissionId.fromString) + + def parseLFWorkflowIdO(workflowId: String): ParsingResult[Option[LfWorkflowId]] = + Option + .when(workflowId.nonEmpty)(parseString(workflowId)(LfWorkflowId.fromString)) + .sequence + + def parseLedgerTransactionId(id: String): ParsingResult[LedgerTransactionId] = + parseString(id)(LedgerTransactionId.fromString) + + def parseLfContractId(id: String): ParsingResult[LfContractId] = + parseString(id)(LfContractId.fromString) + + def parseCommandId(id: String): ParsingResult[Ref.CommandId] = + parseString(id)(Ref.CommandId.fromString) + + def parseTemplateIdO(id: String): ParsingResult[Option[LfTemplateId]] = + OptionUtil.emptyStringAsNone(id).traverse(parseTemplateId) + + def parseTemplateId(id: String): ParsingResult[LfTemplateId] = + parseString(id)(LfTemplateId.fromString) + + private def parseString[T](from: String)(to: String => Either[String, T]): ParsingResult[T] = + to(from).leftMap(StringConversionError) + object InstantConverter extends ProtoConverter[Instant, Timestamp, ProtoDeserializationError] { + override def toProtoPrimitive(value: Instant): Timestamp = + Timestamp(value.getEpochSecond, value.getNano) + + override def fromProtoPrimitive(proto: Timestamp): ParsingResult[Instant] = + try { + Right(Instant.ofEpochSecond(proto.seconds, proto.nanos.toLong)) + } catch { + case _: DateTimeException => + Left(TimestampConversionError("timestamp exceeds min or max of Instant")) + case _: ArithmeticException => Left(TimestampConversionError("numeric overflow")) + } + + } + + object DurationConverter + extends ProtoConverter[ + java.time.Duration, + com.google.protobuf.duration.Duration, + ProtoDeserializationError, + ] { + override def toProtoPrimitive(duration: Duration): com.google.protobuf.duration.Duration = + com.google.protobuf.duration.Duration(duration.getSeconds, duration.getNano) + override def fromProtoPrimitive( + duration: com.google.protobuf.duration.Duration + ): ParsingResult[java.time.Duration] = + Right(java.time.Duration.ofSeconds(duration.seconds, duration.nanos.toLong)) + } + + object UuidConverter extends ProtoConverter[UUID, String, StringConversionError] { + override def toProtoPrimitive(uuid: UUID): String = uuid.toString + + override def fromProtoPrimitive(uuidP: String): Either[StringConversionError, UUID] = + Either + .catchOnly[IllegalArgumentException](UUID.fromString(uuidP)) + .leftMap(err => StringConversionError(err.getMessage)) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/CursorPreheadStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/CursorPreheadStore.scala new file mode 100644 index 0000000000..bce4dd5cd9 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/CursorPreheadStore.scala @@ -0,0 +1,67 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store + +import com.digitalasset.canton.data.{CantonTimestamp, Counter} +import com.digitalasset.canton.lifecycle.CloseContext +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.resource.TransactionalStoreUpdate +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.{RequestCounterDiscriminator, SequencerCounterDiscriminator} + +import scala.concurrent.{ExecutionContext, Future} + +/** Storage for a cursor prehead. */ +trait CursorPreheadStore[Discr] extends AutoCloseable { + private[store] implicit def ec: ExecutionContext + + /** Gets the prehead of the cursor. */ + def prehead(implicit traceContext: TraceContext): Future[Option[CursorPrehead[Discr]]] + + /** Forces an update to the cursor prehead. Only use this for maintenance and testing. */ + // Cannot implement this method with advancePreheadTo/rewindPreheadTo + // because it allows to atomically overwrite the timestamp associated with the current prehead. + private[canton] def overridePreheadUnsafe(newPrehead: Option[CursorPrehead[Discr]])(implicit + traceContext: TraceContext + ): Future[Unit] + + /** Sets the prehead counter to `newPrehead` unless it is already at the same or a higher value. + * The prehead counter should be set to the counter before the head of the corresponding cursor. + */ + def advancePreheadTo(newPrehead: CursorPrehead[Discr])(implicit + traceContext: TraceContext, + callerCloseContext: CloseContext, + ): Future[Unit] = + advancePreheadToTransactionalStoreUpdate(newPrehead).runStandalone() + + /** [[advancePreheadTo]] as a [[com.digitalasset.canton.resource.TransactionalStoreUpdate]] */ + def advancePreheadToTransactionalStoreUpdate(newPrehead: CursorPrehead[Discr])(implicit + traceContext: TraceContext + ): TransactionalStoreUpdate + + /** Sets the prehead counter to `newPreheadO` if it is currently set to a higher value. */ + def rewindPreheadTo(newPreheadO: Option[CursorPrehead[Discr]])(implicit + traceContext: TraceContext + ): Future[Unit] +} + +/** Information for the prehead of a cursor. + * The prehead of a cursor is the counter before the cursors' head, if any. + * + * @param counter The counter corresponding to the prehead + * @param timestamp The timestamp corresponding to the prehead + */ +final case class CursorPrehead[Discr](counter: Counter[Discr], timestamp: CantonTimestamp) + extends PrettyPrinting { + + override def pretty: Pretty[CursorPrehead.this.type] = prettyOfClass( + param("counter", _.counter), + param("timestamp", _.timestamp), + ) +} + +object CursorPrehead { + type SequencerCounterCursorPrehead = CursorPrehead[SequencerCounterDiscriminator] + type RequestCounterCursorPrehead = CursorPrehead[RequestCounterDiscriminator] +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/IndexedStringStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/IndexedStringStore.scala new file mode 100644 index 0000000000..01f8f11667 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/IndexedStringStore.scala @@ -0,0 +1,210 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store + +import cats.data.{EitherT, OptionT} +import cats.syntax.either.* +import com.digitalasset.canton.checked +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.config.{CacheConfig, ProcessingTimeout} +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage} +import com.digitalasset.canton.store.db.DbIndexedStringStore +import com.digitalasset.canton.store.memory.InMemoryIndexedStringStore +import com.digitalasset.canton.topology.{DomainId, Member} +import com.github.blemale.scaffeine.{AsyncLoadingCache, Scaffeine} +import com.google.common.annotations.VisibleForTesting +import slick.jdbc.{PositionedParameters, SetParameter} + +import scala.collection.mutable +import scala.concurrent.{ExecutionContext, Future} + +trait IndexedString[E] { + def item: E + def index: Int +} +object IndexedString { + + abstract class Impl[E](val item: E) extends IndexedString[E] + + implicit val setParameterIndexedString: SetParameter[IndexedString[_]] = + (d: IndexedString[_], pp: PositionedParameters) => pp.setInt(d.index) + + implicit val setParameterIndexedStringO: SetParameter[Option[IndexedString[_]]] = + (d: Option[IndexedString[_]], pp: PositionedParameters) => pp.setIntOption(d.map(_.index)) + +} + +// common interface for companion objects +abstract class IndexedStringFromDb[A <: IndexedString[B], B] { + + protected def buildIndexed(item: B, index: Int): A + protected def asString(item: B): String300 + protected def dbTyp: IndexedStringType + protected def fromString(str: String300, index: Int): Either[String, A] + + def indexed( + indexedStringStore: IndexedStringStore + )(item: B)(implicit ec: ExecutionContext): Future[A] = + indexedStringStore + .getOrCreateIndex(dbTyp, asString(item)) + .map(buildIndexed(item, _)) + + def fromDbIndexOT(context: String, indexedStringStore: IndexedStringStore)( + index: Int + )(implicit ec: ExecutionContext, loggingContext: ErrorLoggingContext): OptionT[Future, A] = { + fromDbIndexET(indexedStringStore)(index).leftMap { err => + loggingContext.logger.error( + s"Corrupt log id: ${index} for ${dbTyp} within context $context: $err" + )(loggingContext.traceContext) + }.toOption + } + + def fromDbIndexET( + indexedStringStore: IndexedStringStore + )(index: Int)(implicit ec: ExecutionContext): EitherT[Future, String, A] = { + EitherT(indexedStringStore.getForIndex(dbTyp, index).map { strO => + for { + str <- strO.toRight("No entry for given index") + parsed <- fromString(str, index) + } yield parsed + }) + } +} + +final case class IndexedDomain private (domainId: DomainId, index: Int) + extends IndexedString.Impl[DomainId](domainId) { + require( + index > 0, + s"Illegal index $index. The index must be positive to prevent clashes with participant event log ids.", + ) +} + +object IndexedDomain extends IndexedStringFromDb[IndexedDomain, DomainId] { + + /** @throws java.lang.IllegalArgumentException if `index <= 0`. + */ + @VisibleForTesting + def tryCreate(domainId: DomainId, index: Int): IndexedDomain = + IndexedDomain(domainId, index) + + override protected def dbTyp: IndexedStringType = IndexedStringType.domainId + + override protected def buildIndexed(item: DomainId, index: Int): IndexedDomain = { + // save, because buildIndexed is only called with indices created by IndexedStringStores. + // These indices are positive by construction. + checked(tryCreate(item, index)) + } + + override protected def asString(item: DomainId): String300 = + item.toLengthLimitedString.asString300 + + override protected def fromString(str: String300, index: Int): Either[String, IndexedDomain] = { + // save, because fromString is only called with indices created by IndexedStringStores. + // These indices are positive by construction. + DomainId.fromString(str.unwrap).map(checked(tryCreate(_, index))) + } +} + +final case class IndexedMember private (member: Member, index: Int) + extends IndexedString.Impl[Member](member) +object IndexedMember extends IndexedStringFromDb[IndexedMember, Member] { + override protected def buildIndexed(item: Member, index: Int): IndexedMember = + IndexedMember(item, index) + override protected def asString(item: Member): String300 = item.toLengthLimitedString + override protected def dbTyp: IndexedStringType = IndexedStringType.memberId + override protected def fromString(str: String300, index: Int): Either[String, IndexedMember] = + Member.fromProtoPrimitive(str.unwrap, "member").leftMap(_.toString).map(IndexedMember(_, index)) +} + +final case class IndexedStringType private (source: Int, description: String) +object IndexedStringType { + + private val ids: mutable.Map[Int, IndexedStringType] = + mutable.TreeMap.empty[Int, IndexedStringType] + + /** Creates a new [[IndexedStringType]] with a given description */ + def apply(source: Int, description: String): IndexedStringType = { + val item = new IndexedStringType(source, description) + ids.put(source, item).foreach { oldItem => + throw new IllegalArgumentException( + s"requirement failed: IndexedStringType with id=$source already exists as $oldItem" + ) + } + item + } + + val domainId: IndexedStringType = IndexedStringType(1, "domainId") + val memberId: IndexedStringType = IndexedStringType(2, "memberId") + +} + +/** uid index such that we can store integers instead of long strings in our database */ +trait IndexedStringStore extends AutoCloseable { + + def getOrCreateIndex(dbTyp: IndexedStringType, str: String300): Future[Int] + def getForIndex(dbTyp: IndexedStringType, idx: Int): Future[Option[String300]] + +} + +object IndexedStringStore { + def create( + storage: Storage, + config: CacheConfig, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit + ec: ExecutionContext + ): IndexedStringStore = + storage match { + case _: MemoryStorage => InMemoryIndexedStringStore() + case jdbc: DbStorage => + new IndexedStringCache( + new DbIndexedStringStore(jdbc, timeouts, loggerFactory), + config, + loggerFactory, + ) + } +} + +class IndexedStringCache( + parent: IndexedStringStore, + config: CacheConfig, + val loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends IndexedStringStore + with NamedLogging { + + private val str2Index: AsyncLoadingCache[(String300, IndexedStringType), Int] = Scaffeine() + .maximumSize(config.maximumSize.value) + .expireAfterAccess(config.expireAfterAccess.underlying) + .buildAsyncFuture[(String300, IndexedStringType), Int] { case (str, typ) => + parent.getOrCreateIndex(typ, str).map { idx => + index2str.put((idx, typ), Future.successful(Some(str))) + idx + } + } + + // (index,typ) + private val index2str: AsyncLoadingCache[(Int, IndexedStringType), Option[String300]] = + Scaffeine() + .maximumSize(config.maximumSize.value) + .expireAfterAccess(config.expireAfterAccess.underlying) + .buildAsyncFuture[(Int, IndexedStringType), Option[String300]] { case (idx, typ) => + parent.getForIndex(typ, idx).map { + case Some(str) => + str2Index.put((str, typ), Future.successful(idx)) + Some(str) + case None => None + } + } + + override def getForIndex(dbTyp: IndexedStringType, idx: Int): Future[Option[String300]] = + index2str.get((idx, dbTyp)) + + override def getOrCreateIndex(dbTyp: IndexedStringType, str: String300): Future[Int] = + str2Index.get((str, dbTyp)) + + override def close(): Unit = parent.close() +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/PrunableByTime.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/PrunableByTime.scala new file mode 100644 index 0000000000..445edbe0d0 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/PrunableByTime.scala @@ -0,0 +1,57 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store + +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.pruning.{PruningPhase, PruningStatus} +import com.digitalasset.canton.tracing.TraceContext +import com.google.common.annotations.VisibleForTesting + +import scala.concurrent.{ExecutionContext, Future} + +/** Interface for a store that allows pruning and keeps track of when pruning has started and finished. */ +trait PrunableByTime { + + protected implicit val ec: ExecutionContext + + /** Prune all unnecessary data relating to events before the given timestamp. + * + * The meaning of "unnecessary", and whether the limit is inclusive or exclusive both depend on the particular store. + * The store must implement the actual pruning logic in the [[doPrune]] method. + */ + final def prune( + limit: CantonTimestamp + )(implicit traceContext: TraceContext): Future[Unit] = + for { + lastTs <- getLastPruningTs + _ <- advancePruningTimestamp(PruningPhase.Started, limit) + _ <- doPrune(limit, lastTs) + _ <- advancePruningTimestamp(PruningPhase.Completed, limit) + } yield () + + private def getLastPruningTs(implicit + traceContext: TraceContext + ): Future[Option[CantonTimestamp]] = pruningStatus.map(_.flatMap(_.lastSuccess)) + + /** Returns the latest timestamp at which pruning was started or completed. + * For [[com.digitalasset.canton.pruning.PruningPhase.Started]], it is guaranteed + * that no pruning has been run on the store after the returned timestamp. + * For [[com.digitalasset.canton.pruning.PruningPhase.Completed]], it is guaranteed + * that the store is pruned at least up to the returned timestamp (inclusive). + * That is, another pruning with the returned timestamp (or earlier) has no effect on the store. + * Returns [[scala.None$]] if no pruning has ever been started on the store. + */ + def pruningStatus(implicit traceContext: TraceContext): Future[Option[PruningStatus]] + + @VisibleForTesting + protected[canton] def advancePruningTimestamp(phase: PruningPhase, timestamp: CantonTimestamp)( + implicit traceContext: TraceContext + ): Future[Unit] + + @VisibleForTesting + protected[canton] def doPrune(limit: CantonTimestamp, lastPruning: Option[CantonTimestamp])( + implicit traceContext: TraceContext + ): Future[Unit] + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/SendTrackerStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/SendTrackerStore.scala new file mode 100644 index 0000000000..b1f2c815b9 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/SendTrackerStore.scala @@ -0,0 +1,57 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store + +import cats.data.EitherT +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage} +import com.digitalasset.canton.sequencing.protocol.MessageId +import com.digitalasset.canton.store.memory.InMemorySendTrackerStore +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.{ExecutionContext, Future} + +/** Keeps track of [[com.digitalasset.canton.sequencing.protocol.SubmissionRequest]]s + * that have been sent to the sequencer but not yet witnessed. + */ +trait SendTrackerStore extends AutoCloseable { + + /** Fetch all pending sends currently stored. */ + def fetchPendingSends(implicit + traceContext: TraceContext + ): Future[Map[MessageId, CantonTimestamp]] + + /** Saves that a send will be submitted with this message-id and that if sequenced we expect to see a deliver or + * deliver error by the provided max sequencing time. + */ + def savePendingSend(messageId: MessageId, maxSequencingTime: CantonTimestamp)(implicit + traceContext: TraceContext + ): EitherT[Future, SavePendingSendError, Unit] + + /** Removes a pending send from the set we are tracking. + * Implementations should be idempotent and not error if the message-id is not tracked. + */ + def removePendingSend(messageId: MessageId)(implicit traceContext: TraceContext): Future[Unit] +} + +object SendTrackerStore { + def apply(storage: Storage)(implicit executionContext: ExecutionContext): SendTrackerStore = + storage match { + // Always use an in-memory send tracker store. + // This is a temporary fix to avoid performance problems, because we block on accessing the send tracker store + // from the hot loop of the sequencer client. + // TODO(i5660): Use the db-based send tracker store + case _: MemoryStorage => new InMemorySendTrackerStore() + case _: DbStorage => new InMemorySendTrackerStore() + } +} + +final case class SendTrackerDatabaseError(exception: Throwable) + +sealed trait SavePendingSendError +object SavePendingSendError { + + /** The provided message id is already being tracked and cannot be reused until complete */ + case object MessageIdAlreadyTracked extends SavePendingSendError +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/SequencedEventStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/SequencedEventStore.scala new file mode 100644 index 0000000000..1aab1487f8 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/SequencedEventStore.scala @@ -0,0 +1,398 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store + +import cats.data.EitherT +import cats.syntax.traverse.* +import com.digitalasset.canton.SequencerCounter +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.crypto.HashOps +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.CloseContext +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.protocol.messages.{DefaultOpenEnvelope, ProtocolMessage} +import com.digitalasset.canton.protocol.v0 +import com.digitalasset.canton.pruning.PruningStatus +import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage} +import com.digitalasset.canton.sequencing.protocol.* +import com.digitalasset.canton.sequencing.{ + OrdinarySerializedEvent, + PossiblyIgnoredProtocolEvent, + PossiblyIgnoredSerializedEvent, +} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.store.SequencedEventStore.PossiblyIgnoredSequencedEvent.dbTypeOfEvent +import com.digitalasset.canton.store.SequencedEventStore.* +import com.digitalasset.canton.store.db.DbSequencedEventStore.SequencedEventDbType +import com.digitalasset.canton.store.db.{DbSequencedEventStore, SequencerClientDiscriminator} +import com.digitalasset.canton.store.memory.InMemorySequencedEventStore +import com.digitalasset.canton.tracing.{ + HasTraceContext, + SerializableTraceContext, + TraceContext, + Traced, +} +import com.digitalasset.canton.version.ProtocolVersion +import com.google.common.annotations.VisibleForTesting + +import scala.concurrent.{ExecutionContext, Future} + +/** Persistent store for [[com.digitalasset.canton.sequencing.protocol.SequencedEvent]]s received from the sequencer. + * The store may assume that sequencer counters strictly increase with timestamps + * without checking this precondition. + */ +trait SequencedEventStore extends PrunableByTime with NamedLogging with AutoCloseable { + + import SequencedEventStore.SearchCriterion + + implicit val ec: ExecutionContext + + /** Stores the given [[com.digitalasset.canton.sequencing.protocol.SequencedEvent]]s. + * If an event with the same timestamp already exist, the event may remain unchanged or overwritten. + */ + def store(signedEvents: Seq[OrdinarySerializedEvent])(implicit + traceContext: TraceContext, + externalCloseContext: CloseContext, + ): Future[Unit] + + /** Looks up an event by the given criterion. + * + * @return [[SequencedEventNotFoundError]] if no stored event meets the criterion. + */ + def find(criterion: SearchCriterion)(implicit + traceContext: TraceContext + ): EitherT[Future, SequencedEventNotFoundError, PossiblyIgnoredSerializedEvent] + + /** Looks up a set of sequenced events within the given range. + * + * @param limit The maximum number of elements in the returned iterable, if set. + */ + def findRange(criterion: RangeCriterion, limit: Option[Int])(implicit + traceContext: TraceContext + ): EitherT[Future, SequencedEventRangeOverlapsWithPruning, Seq[PossiblyIgnoredSerializedEvent]] + + def sequencedEvents(limit: Option[Int] = None)(implicit + traceContext: TraceContext + ): Future[Seq[PossiblyIgnoredSerializedEvent]] + + /** Marks events between `from` and `to` as ignored. + * Fills any gap between `from` and `to` by empty ignored events, i.e. ignored events without any underlying real event. + * + * @return [[ChangeWouldResultInGap]] if there would be a gap between the highest sequencer counter in the store and `from`. + */ + def ignoreEvents(from: SequencerCounter, to: SequencerCounter)(implicit + traceContext: TraceContext + ): EitherT[Future, ChangeWouldResultInGap, Unit] + + /** Removes the ignored status from all events between `from` and `to`. + * + * @return [[ChangeWouldResultInGap]] if deleting empty ignored events between `from` and `to` would result in a gap in sequencer counters. + */ + def unignoreEvents(from: SequencerCounter, to: SequencerCounter)(implicit + traceContext: TraceContext + ): EitherT[Future, ChangeWouldResultInGap, Unit] + + /** Deletes all events with sequencer counter greater than or equal to `from`. + */ + @VisibleForTesting + private[canton] def delete(from: SequencerCounter)(implicit + traceContext: TraceContext + ): Future[Unit] +} + +object SequencedEventStore { + + def apply[Env <: Envelope[_]]( + storage: Storage, + member: SequencerClientDiscriminator, + protocolVersion: ProtocolVersion, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit executionContext: ExecutionContext): SequencedEventStore = + storage match { + case _: MemoryStorage => new InMemorySequencedEventStore(loggerFactory) + case dbStorage: DbStorage => + new DbSequencedEventStore(dbStorage, member, protocolVersion, timeouts, loggerFactory) + } + + sealed trait SearchCriterion extends Product with Serializable + + /** Find the event with the given timestamp */ + final case class ByTimestamp(timestamp: CantonTimestamp) extends SearchCriterion + + /** Finds the event with the highest timestamp before or at `inclusive` */ + final case class LatestUpto(inclusive: CantonTimestamp) extends SearchCriterion + + /** Finds a sequence of events within a range */ + sealed trait RangeCriterion extends Product with Serializable with PrettyPrinting + + /** Finds all events with timestamps within the given range. + * + * @param lowerInclusive The lower bound, inclusive. Must not be after `upperInclusive` + * @param upperInclusive The upper bound, inclusive. Must not be before `lowerInclusive` + * @throws java.lang.IllegalArgumentException if `lowerInclusive` is after `upperInclusive` + */ + final case class ByTimestampRange( + lowerInclusive: CantonTimestamp, + upperInclusive: CantonTimestamp, + ) extends RangeCriterion { + require( + lowerInclusive <= upperInclusive, + s"Lower bound timestamp $lowerInclusive is after upper bound $upperInclusive", + ) + + override def pretty: Pretty[ByTimestampRange] = prettyOfClass( + param("lower inclusive", _.lowerInclusive), + param("upper inclusive", _.upperInclusive), + ) + } + + /** Encapsulates an event stored in the SequencedEventStore. + */ + sealed trait PossiblyIgnoredSequencedEvent[+Env <: Envelope[_]] + extends HasTraceContext + with PrettyPrinting + with Product + with Serializable { + def timestamp: CantonTimestamp + + def trafficState: Option[SequencedEventTrafficState] + + def counter: SequencerCounter + + def underlyingEventBytes: Array[Byte] + + private[store] def dbType: SequencedEventDbType + + def isIgnored: Boolean + + def underlying: Option[SignedContent[SequencedEvent[Env]]] + + def asIgnoredEvent: IgnoredSequencedEvent[Env] + + def asOrdinaryEvent: PossiblyIgnoredSequencedEvent[Env] + + def toProtoV0: v0.PossiblyIgnoredSequencedEvent = + v0.PossiblyIgnoredSequencedEvent( + counter = counter.toProtoPrimitive, + timestamp = Some(timestamp.toProtoPrimitive), + traceContext = Some(SerializableTraceContext(traceContext).toProtoV0), + isIgnored = isIgnored, + underlying = underlying.map(_.toProtoV1), + ) + } + + /** Encapsulates an ignored event, i.e., an event that should not be processed. + * + * If an ordinary sequenced event `oe` is later converted to an ignored event `ie`, + * the actual event `oe.signedEvent` is retained as `ie.underlying` so that no information gets discarded by ignoring events. + * If an ignored event `ie` is inserted as a placeholder for an event that has not been received, the underlying + * event `ie.underlying` is left empty. + */ + final case class IgnoredSequencedEvent[+Env <: Envelope[_]]( + override val timestamp: CantonTimestamp, + override val counter: SequencerCounter, + override val underlying: Option[SignedContent[SequencedEvent[Env]]], + override val trafficState: Option[SequencedEventTrafficState] = None, + )(override val traceContext: TraceContext) + extends PossiblyIgnoredSequencedEvent[Env] { + + override def underlyingEventBytes: Array[Byte] = Array.empty + + private[store] override def dbType: SequencedEventDbType = + underlying.fold[SequencedEventDbType](SequencedEventDbType.IgnoredEvent)(e => + dbTypeOfEvent(e.content) + ) + + override def isIgnored: Boolean = true + + override def asIgnoredEvent: IgnoredSequencedEvent[Env] = this + + override def asOrdinaryEvent: PossiblyIgnoredSequencedEvent[Env] = underlying match { + case Some(event) => OrdinarySequencedEvent(event, trafficState)(traceContext) + case None => this + } + + override def pretty: Pretty[IgnoredSequencedEvent[Envelope[_]]] = + prettyOfClass( + param("timestamp", _.timestamp), + param("counter", _.counter), + paramIfDefined("underlying", _.underlying), + ) + } + + object IgnoredSequencedEvent { + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def openEnvelopes( + event: IgnoredSequencedEvent[ClosedEnvelope] + )( + protocolVersion: ProtocolVersion, + hashOps: HashOps, + ): Either[ + Traced[EventWithErrors[SequencedEvent[DefaultOpenEnvelope]]], + IgnoredSequencedEvent[DefaultOpenEnvelope], + ] = { + event.underlying match { + case Some(signedEvent) => + SignedContent + .openEnvelopes(signedEvent)(protocolVersion, hashOps) + .fold( + err => Left(Traced(err.copy(isIgnored = true))(event.traceContext)), + evt => Right(event.copy(underlying = Some(evt))(event.traceContext)), + ) + case None => Right(event.asInstanceOf[IgnoredSequencedEvent[DefaultOpenEnvelope]]) + } + } + } + + /** Encapsulates an event received by the sequencer. + * It has been signed by the sequencer and contains a trace context. + */ + final case class OrdinarySequencedEvent[+Env <: Envelope[_]]( + signedEvent: SignedContent[SequencedEvent[Env]], + trafficState: Option[SequencedEventTrafficState], + )( + override val traceContext: TraceContext + ) extends PossiblyIgnoredSequencedEvent[Env] { + + override def timestamp: CantonTimestamp = signedEvent.content.timestamp + + override def counter: SequencerCounter = signedEvent.content.counter + + override def underlyingEventBytes: Array[Byte] = signedEvent.toByteArray + + private[store] override def dbType: SequencedEventDbType = dbTypeOfEvent(signedEvent.content) + + override def isIgnored: Boolean = false + + def isTombstone: Boolean = signedEvent.content.isTombstone + + override def underlying: Some[SignedContent[SequencedEvent[Env]]] = Some(signedEvent) + + override def asIgnoredEvent: IgnoredSequencedEvent[Env] = + IgnoredSequencedEvent(timestamp, counter, Some(signedEvent), trafficState)(traceContext) + + override def asOrdinaryEvent: PossiblyIgnoredSequencedEvent[Env] = this + + override def pretty: Pretty[OrdinarySequencedEvent[Envelope[_]]] = prettyOfClass( + param("signedEvent", _.signedEvent) + ) + } + + object OrdinarySequencedEvent { + def openEnvelopes( + event: OrdinarySequencedEvent[ClosedEnvelope] + )( + protocolVersion: ProtocolVersion, + hashOps: HashOps, + ): Either[ + Traced[EventWithErrors[SequencedEvent[DefaultOpenEnvelope]]], + OrdinarySequencedEvent[DefaultOpenEnvelope], + ] = { + val openSignedEventE = + SignedContent.openEnvelopes(event.signedEvent)(protocolVersion, hashOps) + openSignedEventE.fold( + err => Left(Traced(err)(event.traceContext)), + evt => Right(event.copy(signedEvent = evt)(event.traceContext)), + ) + } + } + + object PossiblyIgnoredSequencedEvent { + + private[store] def dbTypeOfEvent(content: SequencedEvent[_]): SequencedEventDbType = + content match { + case _: DeliverError => SequencedEventDbType.DeliverError + case _: Deliver[_] => SequencedEventDbType.Deliver + } + + def fromProtoV0(protocolVersion: ProtocolVersion, hashOps: HashOps)( + possiblyIgnoredSequencedEventP: v0.PossiblyIgnoredSequencedEvent + ): ParsingResult[PossiblyIgnoredProtocolEvent] = { + val v0.PossiblyIgnoredSequencedEvent( + counter, + timestampPO, + traceContextPO, + isIgnored, + underlyingPO, + ) = possiblyIgnoredSequencedEventP + + val sequencerCounter = SequencerCounter(counter) + + for { + underlyingO <- underlyingPO.traverse( + SignedContent + .fromProtoV1(_) + .flatMap( + _.deserializeContent(SequencedEvent.fromByteStringOpen(hashOps, protocolVersion)) + ) + ) + timestamp <- ProtoConverter + .required("timestamp", timestampPO) + .flatMap(CantonTimestamp.fromProtoPrimitive) + traceContext <- ProtoConverter + .required("trace_context", traceContextPO) + .flatMap(SerializableTraceContext.fromProtoV0) + possiblyIgnoredSequencedEvent <- + if (isIgnored) { + Right( + IgnoredSequencedEvent(timestamp, sequencerCounter, underlyingO, None)( + traceContext.unwrap + ) + ) + } else + ProtoConverter + .required("underlying", underlyingO) + // TODO(i13596): This only seems to be used to deserialize time proof events. Revisit whether or not we do need the traffic state for that + .map( + OrdinarySequencedEvent(_, Option.empty[SequencedEventTrafficState])( + traceContext.unwrap + ) + ) + } yield possiblyIgnoredSequencedEvent + } + + def openEnvelopes( + event: PossiblyIgnoredSequencedEvent[ClosedEnvelope] + )( + protocolVersion: ProtocolVersion, + hashOps: HashOps, + ): Either[ + Traced[EventWithErrors[SequencedEvent[OpenEnvelope[ProtocolMessage]]]], + PossiblyIgnoredSequencedEvent[OpenEnvelope[ProtocolMessage]], + ] = + event match { + case evt: OrdinarySequencedEvent[_] => + OrdinarySequencedEvent.openEnvelopes(evt)(protocolVersion, hashOps) + case evt: IgnoredSequencedEvent[_] => + IgnoredSequencedEvent.openEnvelopes(evt)(protocolVersion, hashOps) + } + } +} + +sealed trait SequencedEventStoreError extends Product with Serializable + +final case class SequencedEventNotFoundError(criterion: SequencedEventStore.SearchCriterion) + extends SequencedEventStoreError + +final case class SequencedEventRangeOverlapsWithPruning( + criterion: RangeCriterion, + pruningStatus: PruningStatus, + foundEvents: Seq[PossiblyIgnoredSerializedEvent], +) extends SequencedEventStoreError + with PrettyPrinting { + override def pretty: Pretty[SequencedEventRangeOverlapsWithPruning.this.type] = prettyOfClass( + param("criterion", _.criterion), + param("pruning status", _.pruningStatus), + param("found events", _.foundEvents), + ) +} + +final case class ChangeWouldResultInGap(from: SequencerCounter, to: SequencerCounter) + extends SequencedEventStoreError { + override def toString: String = + s"Unable to perform operation, because that would result in a sequencer counter gap between $from and $to." +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/SequencerCounterTrackerStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/SequencerCounterTrackerStore.scala new file mode 100644 index 0000000000..a643008637 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/SequencerCounterTrackerStore.scala @@ -0,0 +1,65 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store + +import com.digitalasset.canton.SequencerCounterDiscriminator +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.lifecycle.{CloseContext, FlagCloseable} +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage} +import com.digitalasset.canton.store.CursorPrehead.SequencerCounterCursorPrehead +import com.digitalasset.canton.store.db.{ + DbSequencerCounterTrackerStore, + SequencerClientDiscriminator, +} +import com.digitalasset.canton.store.memory.InMemorySequencerCounterTrackerStore +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.{ExecutionContext, Future} + +/** Store for keeping track of the prehead for clean sequencer counters. + * A [[com.digitalasset.canton.SequencerCounter]] becomes clean + * when the corresponding [[com.digitalasset.canton.sequencing.protocol.SequencedEvent]] has been processed + * completely and successfully. + * The prehead of the cursor is advanced only so far that all sequencer counters up to the prehead are clean. + */ +trait SequencerCounterTrackerStore extends FlagCloseable { + protected[store] val cursorStore: CursorPreheadStore[SequencerCounterDiscriminator] + + /** Gets the prehead clean sequencer counter. This sequencer counter and all the ones below are assumed to be clean. */ + def preheadSequencerCounter(implicit + traceContext: TraceContext + ): Future[Option[SequencerCounterCursorPrehead]] = + cursorStore.prehead + + /** Sets the prehead clean sequencer counter to `sequencerCounter` unless it has previously been set to a higher value. */ + def advancePreheadSequencerCounterTo( + sequencerCounter: SequencerCounterCursorPrehead + )(implicit + traceContext: TraceContext, + callerCloseContext: CloseContext, + ): Future[Unit] = + cursorStore.advancePreheadTo(sequencerCounter) + + /** Rewinds the prehead clean sequencer counter to `newPrehead` unless the prehead is already at or before the new `preHead`. */ + def rewindPreheadSequencerCounter( + newPreheadO: Option[SequencerCounterCursorPrehead] + )(implicit + traceContext: TraceContext + ): Future[Unit] = + cursorStore.rewindPreheadTo(newPreheadO) +} + +object SequencerCounterTrackerStore { + def apply( + storage: Storage, + client: SequencerClientDiscriminator, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit ec: ExecutionContext): SequencerCounterTrackerStore = storage match { + case _: MemoryStorage => new InMemorySequencerCounterTrackerStore(loggerFactory, timeouts) + case dbStorage: DbStorage => + new DbSequencerCounterTrackerStore(client, dbStorage, timeouts, loggerFactory) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/SessionKeyStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/SessionKeyStore.scala new file mode 100644 index 0000000000..6eddbb15d9 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/SessionKeyStore.scala @@ -0,0 +1,166 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store + +import cats.data.EitherT +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config +import com.digitalasset.canton.config.CacheConfigWithTimeout +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.store.SessionKeyStore.RecipientGroup +import com.digitalasset.canton.topology.ParticipantId +import com.digitalasset.canton.tracing.TraceContext +import com.github.blemale.scaffeine.Cache + +import scala.concurrent.{ExecutionContext, Future} + +//TODO(#15057) Add stats on cache hits/misses +sealed trait SessionKeyStore { + + protected[canton] def getSessionKeyInfoIfPresent( + recipients: RecipientGroup + ): Option[SessionKeyInfo] + + protected[canton] def saveSessionKeyInfo( + recipients: RecipientGroup, + sessionKeyInfo: SessionKeyInfo, + ): Unit + + protected[canton] def getSessionKeyRandomnessIfPresent( + encryptedRandomness: AsymmetricEncrypted[SecureRandomness] + ): Option[SecureRandomness] + + def getSessionKeyRandomness( + privateCrypto: CryptoPrivateApi, + keySizeInBytes: Int, + encryptedRandomness: AsymmetricEncrypted[SecureRandomness], + )(implicit + tc: TraceContext, + ec: ExecutionContext, + ): EitherT[Future, DecryptionError, SecureRandomness] + +} + +object SessionKeyStoreDisabled extends SessionKeyStore { + + protected[canton] def getSessionKeyInfoIfPresent( + recipients: RecipientGroup + ): Option[SessionKeyInfo] = None + + protected[canton] def saveSessionKeyInfo( + recipients: RecipientGroup, + sessionKeyInfo: SessionKeyInfo, + ): Unit = () + + protected[canton] def getSessionKeyRandomnessIfPresent( + encryptedRandomness: AsymmetricEncrypted[SecureRandomness] + ): Option[SecureRandomness] = None + + def getSessionKeyRandomness( + privateCrypto: CryptoPrivateApi, + keySizeInBytes: Int, + encryptedRandomness: AsymmetricEncrypted[SecureRandomness], + )(implicit + tc: TraceContext, + ec: ExecutionContext, + ): EitherT[Future, DecryptionError, SecureRandomness] = + privateCrypto + .decrypt(encryptedRandomness)( + SecureRandomness.fromByteString(keySizeInBytes) + ) + +} + +final class SessionKeyStoreWithInMemoryCache(sessionKeysCacheConfig: CacheConfigWithTimeout) + extends SessionKeyStore { + + /** This cache keeps track of the session key information for each recipient group, which is then used to encrypt the randomness that is + * part of the encrypted view messages. + * + * This cache may create interesting eviction strategies during a key roll of a recipient. + * Whether a key is considered revoked or not depends on the snapshot we're picking. + * + * So, consider two concurrent transaction submissions: + * + * - tx1 and tx2 pick a snapshot where the key is still valid + * - tx3 and tx4 pick a snapshot where the key is invalid + * + * However, due to concurrency, they interleave for the encrypted view message factory as tx1, tx3, tx2, tx4 + * - tx1 populates the cache for the recipients with a new session key; + * - tx3 notices that the key is no longer valid, produces a new session key and replaces the old one; + * - tx2 finds the session key from tx3, but considers it invalid because the key is not active. So create a new session key and evict the old on; + * - tx4 installs again a new session key + * + * Since key rolls are rare and everything still remains consistent we accept this as an expected behavior. + */ + private lazy val sessionKeysCacheSender: Cache[RecipientGroup, SessionKeyInfo] = + sessionKeysCacheConfig + .buildScaffeine() + .build() + + protected[canton] def getSessionKeyInfoIfPresent( + recipients: RecipientGroup + ): Option[SessionKeyInfo] = + sessionKeysCacheSender.getIfPresent(recipients) + + protected[canton] def saveSessionKeyInfo( + recipients: RecipientGroup, + sessionKeyInfo: SessionKeyInfo, + ): Unit = + sessionKeysCacheSender.put(recipients, sessionKeyInfo) + + /** This cache keeps track of the matching encrypted randomness for the session keys and their correspondent unencrypted value. + * This way we can save on the amount of asymmetric decryption operations. + */ + private lazy val sessionKeysCacheRecipient + : Cache[AsymmetricEncrypted[SecureRandomness], SecureRandomness] = + sessionKeysCacheConfig + .buildScaffeine() + .build() + + protected[canton] def getSessionKeyRandomnessIfPresent( + encryptedRandomness: AsymmetricEncrypted[SecureRandomness] + ): Option[SecureRandomness] = + sessionKeysCacheRecipient.getIfPresent(encryptedRandomness) + + def getSessionKeyRandomness( + privateCrypto: CryptoPrivateApi, + keySizeInBytes: Int, + encryptedRandomness: AsymmetricEncrypted[SecureRandomness], + )(implicit + tc: TraceContext, + ec: ExecutionContext, + ): EitherT[Future, DecryptionError, SecureRandomness] = + sessionKeysCacheRecipient.getIfPresent(encryptedRandomness) match { + case Some(randomness) => EitherT.rightT[Future, DecryptionError](randomness) + case None => + privateCrypto + .decrypt(encryptedRandomness)( + SecureRandomness.fromByteString(keySizeInBytes) + ) + .map { randomness => + /* TODO(#15022): to ensure transparency, in the future, we will probably want to cache not just the + * encrypted randomness for this participant, but for all recipients so that you can also cache the + * check that everyone can decrypt the randomness if they need it. + */ + sessionKeysCacheRecipient.put(encryptedRandomness, randomness) + randomness + } + } + +} + +object SessionKeyStore { + + def apply(cacheConfig: CacheConfigWithTimeout): SessionKeyStore = + if (cacheConfig.expireAfterTimeout == config.NonNegativeFiniteDuration.Zero) + SessionKeyStoreDisabled + else new SessionKeyStoreWithInMemoryCache(cacheConfig) + + // Defines a set of recipients and the crypto scheme used to generate the session key for that group + final case class RecipientGroup( + recipients: NonEmpty[Set[ParticipantId]], + cryptoScheme: SymmetricKeyScheme, + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbCursorPreheadStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbCursorPreheadStore.scala new file mode 100644 index 0000000000..bb9b10491c --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbCursorPreheadStore.scala @@ -0,0 +1,162 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store.db + +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.data.{CantonTimestamp, Counter} +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.metrics.TimedLoadGauge +import com.digitalasset.canton.resource.{DbStorage, DbStore, TransactionalStoreUpdate} +import com.digitalasset.canton.store.{CursorPrehead, CursorPreheadStore} +import com.digitalasset.canton.tracing.TraceContext +import com.google.common.annotations.VisibleForTesting + +import scala.annotation.nowarn +import scala.concurrent.{ExecutionContext, Future} + +/** DB storage for a cursor prehead for a domain + * + * @param cursorTable The table name to store the cursor prehead. + * The table must define the following columns: + *
    + *
  • client varchar not null primary key
  • + *
  • prehead_counter bigint not null
  • + *
  • ts bigint not null
  • + *
+ * @param processingTime The metric to be used for DB queries + */ +class DbCursorPreheadStore[Discr]( + client: SequencerClientDiscriminator, + override protected val storage: DbStorage, + cursorTable: String, + processingTime: TimedLoadGauge, + override protected val timeouts: ProcessingTimeout, + override protected val loggerFactory: NamedLoggerFactory, +)(override private[store] implicit val ec: ExecutionContext) + extends CursorPreheadStore[Discr] + with DbStore { + import storage.api.* + + @nowarn("msg=match may not be exhaustive") + override def prehead(implicit + traceContext: TraceContext + ): Future[Option[CursorPrehead[Discr]]] = + processingTime.event { + val preheadQuery = + sql"""select prehead_counter, ts from #$cursorTable where client = $client order by prehead_counter desc #${storage + .limit(2)}""" + .as[(Counter[Discr], CantonTimestamp)] + storage.query(preheadQuery, functionFullName).map { + case Seq() => None + case (preheadCounter, preheadTimestamp) +: rest => + if (rest.nonEmpty) + logger.warn( + s"Found several preheads for $client in $cursorTable instead of at most one; using $preheadCounter as prehead" + ) + Some(CursorPrehead(preheadCounter, preheadTimestamp)) + } + } + + @VisibleForTesting + override private[canton] def overridePreheadUnsafe( + newPrehead: Option[CursorPrehead[Discr]] + )(implicit traceContext: TraceContext): Future[Unit] = processingTime.event { + logger.info(s"Override prehead counter in $cursorTable to $newPrehead") + newPrehead match { + case None => delete() + case Some(CursorPrehead(counter, timestamp)) => + val query = storage.profile match { + case _: DbStorage.Profile.H2 => + sqlu"merge into #$cursorTable (client, prehead_counter, ts) values ($client, $counter, $timestamp)" + case _: DbStorage.Profile.Postgres => + sqlu"""insert into #$cursorTable (client, prehead_counter, ts) values ($client, $counter, $timestamp) + on conflict (client) do update set prehead_counter = $counter, ts = $timestamp""" + case _: DbStorage.Profile.Oracle => + sqlu"""merge into #$cursorTable ct + using ( + select + $client client, + $counter counter, + $timestamp ts + from dual + ) val + on (val.client = ct.client) + when matched then + update set ct.prehead_counter = val.counter, ct.ts = val.ts + when not matched then + insert (client, prehead_counter, ts) values (val.client, val.counter, val.ts)""" + } + storage.update_(query, functionFullName) + } + } + + override def advancePreheadToTransactionalStoreUpdate( + newPrehead: CursorPrehead[Discr] + )(implicit traceContext: TraceContext): TransactionalStoreUpdate = { + logger.debug(s"Advancing prehead in $cursorTable to $newPrehead") + val CursorPrehead(counter, timestamp) = newPrehead + val query = storage.profile match { + case _: DbStorage.Profile.H2 => + sqlu""" + merge into #$cursorTable as cursor_table + using dual + on cursor_table.client = $client + when matched and cursor_table.prehead_counter < $counter + then update set cursor_table.prehead_counter = $counter, cursor_table.ts = $timestamp + when not matched then insert (client, prehead_counter, ts) values ($client, $counter, $timestamp) + """ + case _: DbStorage.Profile.Postgres => + sqlu""" + insert into #$cursorTable as cursor_table (client, prehead_counter, ts) + values ($client, $counter, $timestamp) + on conflict (client) do + update set prehead_counter = $counter, ts = $timestamp + where cursor_table.prehead_counter < $counter + """ + case _: DbStorage.Profile.Oracle => + sqlu""" + merge into #$cursorTable cursor_table + using ( + select + $client client + from dual + ) val + on (cursor_table.client = val.client) + when matched then + update set cursor_table.prehead_counter = $counter, cursor_table.ts = $timestamp + where cursor_table.prehead_counter < $counter + when not matched then + insert (client, prehead_counter, ts) values (val.client, $counter, $timestamp) + """ + } + new TransactionalStoreUpdate.DbTransactionalStoreUpdate( + query, + storage, + Some(processingTime), + loggerFactory, + ) + } + + override def rewindPreheadTo( + newPreheadO: Option[CursorPrehead[Discr]] + )(implicit traceContext: TraceContext): Future[Unit] = { + logger.info(s"Rewinding prehead to $newPreheadO") + newPreheadO match { + case None => delete() + case Some(CursorPrehead(counter, timestamp)) => + val query = + sqlu""" + update #$cursorTable + set prehead_counter = $counter, ts = $timestamp + where client = $client and prehead_counter > $counter""" + storage.update_(query, "rewind prehead") + } + } + + private[this] def delete()(implicit traceContext: TraceContext): Future[Unit] = + processingTime.event { + storage.update_(sqlu"""delete from #$cursorTable where client = $client""", functionFullName) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbExceptions.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbExceptions.scala new file mode 100644 index 0000000000..ef2fd47184 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbExceptions.scala @@ -0,0 +1,19 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store.db + +/** Thrown when a persisted value cannot be deserialized back into a scala type. */ +@SuppressWarnings(Array("org.wartremover.warts.Null")) +class DbDeserializationException(message: String, cause: Throwable = null) + extends RuntimeException(message, cause) + +/** Thrown when a value is persisted into the database. */ +@SuppressWarnings(Array("org.wartremover.warts.Null")) +class DbSerializationException(message: String, cause: Throwable = null) + extends RuntimeException(message, cause) + +/** Thrown when the db has not been properly initialized */ +@SuppressWarnings(Array("org.wartremover.warts.Null")) +class DbUninitializedException(message: String, cause: Throwable = null) + extends RuntimeException(message, cause) diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbIndexedStringStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbIndexedStringStore.scala new file mode 100644 index 0000000000..6ebed5dc37 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbIndexedStringStore.scala @@ -0,0 +1,75 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store.db + +import cats.data.OptionT +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.resource.{DbStorage, DbStore} +import com.digitalasset.canton.store.{IndexedStringStore, IndexedStringType} + +import scala.concurrent.{ExecutionContext, Future} + +class DbIndexedStringStore( + override protected val storage: DbStorage, + override protected val timeouts: ProcessingTimeout, + override protected val loggerFactory: NamedLoggerFactory, +)(implicit val executionContext: ExecutionContext) + extends IndexedStringStore + with DbStore { + + import com.digitalasset.canton.tracing.TraceContext.Implicits.Empty.* + import storage.api.* + + override def getOrCreateIndex(dbTyp: IndexedStringType, str: String300): Future[Int] = + getIndexForStr(dbTyp.source, str).getOrElseF { + insertIgnore(dbTyp.source, str).flatMap { _ => + getIndexForStr(dbTyp.source, str).getOrElse { + noTracingLogger.error(s"static string $str is still missing in db after i just stored it") + throw new IllegalStateException( + s"static string $str is still missing in db after i just stored it" + ) + } + } + } + + private def getIndexForStr(dbType: Int, str: String300): OptionT[Future, Int] = + OptionT( + storage + .query( + sql"select id from static_strings where string = $str and source = $dbType" + .as[Int] + .headOption, + functionFullName, + ) + ) + + private def insertIgnore(dbType: Int, str: String300): Future[Unit] = { + // not sure how to get "last insert id" here in case the row was inserted + // therefore, we're just querying the db again. this is a bit dorky, + // but we'll hardly ever do this, so should be good + val query = storage.profile match { + case _: DbStorage.Profile.Postgres | _: DbStorage.Profile.H2 => + sqlu"insert into static_strings (string, source) values ($str, $dbType) ON CONFLICT DO NOTHING" + case _: DbStorage.Profile.Oracle => + sqlu"""INSERT + /*+ IGNORE_ROW_ON_DUPKEY_INDEX ( static_strings (string, source) ) */ + INTO static_strings (string, source) VALUES ($str,$dbType)""" + } + // and now query it + storage.update_(query, functionFullName) + } + + override def getForIndex(dbTyp: IndexedStringType, idx: Int): Future[Option[String300]] = { + storage + .query( + sql"select string from static_strings where id = $idx and source = ${dbTyp.source}" + .as[String300], + functionFullName, + ) + .map(_.headOption) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbPrunableByTime.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbPrunableByTime.scala new file mode 100644 index 0000000000..ab96ab76e1 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbPrunableByTime.scala @@ -0,0 +1,148 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store.db + +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.metrics.TimedLoadGauge +import com.digitalasset.canton.pruning.{PruningPhase, PruningStatus} +import com.digitalasset.canton.resource.{DbStorage, DbStore} +import com.digitalasset.canton.store.{IndexedDomain, IndexedString, PrunableByTime} +import com.digitalasset.canton.tracing.TraceContext +import slick.jdbc.SetParameter + +import scala.concurrent.{ExecutionContext, Future} + +/** Mixin for an db store that stores the latest point in time when + * pruning has started or finished. + * + * The pruning method of the store must use [[advancePruningTimestamp]] to signal the start end completion + * of each pruning. + */ +trait DbPrunableByTime[PartitionKey] extends PrunableByTime { + this: DbStore => + + protected[this] implicit def setParameterDiscriminator: SetParameter[PartitionKey] + + /** The table name to store the pruning timestamp in. + * The table must define the following fields: + *
    + *
  • [[partitionColumn]] primary key
  • + *
  • `phase` stores the [[com.digitalasset.canton.pruning.PruningPhase]]
  • + *
  • `ts` stores the [[com.digitalasset.canton.data.CantonTimestamp]]
  • + *
+ */ + protected[this] def pruning_status_table: String + + protected[this] def partitionColumn: String + + protected[this] def partitionKey: PartitionKey + + protected[this] implicit val ec: ExecutionContext + + import storage.api.* + + protected val processingTime: TimedLoadGauge + + override def pruningStatus(implicit + traceContext: TraceContext + ): Future[Option[PruningStatus]] = + processingTime.event { + val query = sql""" + select phase, ts, succeeded from #$pruning_status_table + where #$partitionColumn = $partitionKey + """.as[PruningStatus].headOption + storage.query(query, functionFullName) + } + + protected[canton] def advancePruningTimestamp(phase: PruningPhase, timestamp: CantonTimestamp)( + implicit traceContext: TraceContext + ): Future[Unit] = processingTime.event { + + val query = (storage.profile, phase) match { + case (_: DbStorage.Profile.Postgres, PruningPhase.Completed) => + sqlu""" + UPDATE #$pruning_status_table SET phase = CAST($phase as pruning_phase), succeeded = $timestamp + WHERE #$partitionColumn = $partitionKey AND ts = $timestamp + """ + case (_, PruningPhase.Completed) => + sqlu""" + UPDATE #$pruning_status_table SET phase = $phase, succeeded = $timestamp + WHERE #$partitionColumn = $partitionKey AND ts = $timestamp + """ + case (_: DbStorage.Profile.H2, PruningPhase.Started) => + sqlu""" + merge into #$pruning_status_table as pruning_status + using dual + on pruning_status.#$partitionColumn = $partitionKey + when matched and (pruning_status.ts < $timestamp) + then update set pruning_status.phase = $phase, pruning_status.ts = $timestamp + when not matched then insert (#$partitionColumn, phase, ts) values ($partitionKey, $phase, $timestamp) + """ + case (_: DbStorage.Profile.Postgres, PruningPhase.Started) => + sqlu""" + insert into #$pruning_status_table as pruning_status (#$partitionColumn, phase, ts) + values ($partitionKey, CAST($phase as pruning_phase), $timestamp) + on conflict (#$partitionColumn) do + update set phase = CAST($phase as pruning_phase), ts = $timestamp + where pruning_status.ts < $timestamp + """ + case (_: DbStorage.Profile.Oracle, PruningPhase.Started) => + sqlu""" + merge into #$pruning_status_table pruning_status + using ( + select + $partitionKey partitionKey, + $phase phase, + $timestamp timestamp + from + dual + ) val + on (pruning_status.#$partitionColumn = val.partitionKey) + when matched then + update set pruning_status.phase = val.phase, pruning_status.ts = val.timestamp + where pruning_status.ts < val.timestamp + when not matched then + insert (#$partitionColumn, phase, ts) values (val.partitionKey, val.phase, val.timestamp) + """ + } + + logger.debug( + s"About to set phase of $pruning_status_table to \"${phase.kind}\" and timestamp to $timestamp" + ) + + for { + rowCount <- storage.update(query, "pruning status upsert") + _ <- + if (logger.underlying.isDebugEnabled && rowCount != 1 && phase == PruningPhase.Started) { + pruningStatus.map { + case Some(previous) if previous.timestamp > timestamp => + logger.debug( + s"Pruning at $timestamp started after another later pruning at ${previous.timestamp}." + ) + case _ => + } + } else Future.successful(()) + } yield { + logger.debug( + s"Finished setting phase of $pruning_status_table to \"${phase.kind}\" and timestamp to $timestamp" + ) + } + } +} + +/** Specialized [[DbPrunableByTime]] that uses the [[com.digitalasset.canton.topology.DomainId]] as discriminator */ +trait DbPrunableByTimeDomain extends DbPrunableByTime[IndexedDomain] { + this: DbStore => + + protected[this] def domainId: IndexedDomain + + override protected[this] def partitionKey: IndexedDomain = domainId + + override protected[this] val partitionColumn = "domain_id" + + override protected[this] implicit val setParameterDiscriminator: SetParameter[IndexedDomain] = + IndexedString.setParameterIndexedString + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbSequencedEventStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbSequencedEventStore.scala new file mode 100644 index 0000000000..c3d6145ce4 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbSequencedEventStore.scala @@ -0,0 +1,418 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store.db + +import cats.data.EitherT +import cats.syntax.either.* +import cats.syntax.functor.* +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.SequencerCounter +import com.digitalasset.canton.config.CantonRequireTypes.String3 +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.CloseContext +import com.digitalasset.canton.logging.* +import com.digitalasset.canton.metrics.TimedLoadGauge +import com.digitalasset.canton.resource.{DbStorage, DbStore} +import com.digitalasset.canton.sequencing.protocol.{ + SequencedEvent, + SequencedEventTrafficState, + SignedContent, +} +import com.digitalasset.canton.sequencing.{OrdinarySerializedEvent, PossiblyIgnoredSerializedEvent} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.store.* +import com.digitalasset.canton.store.db.DbSequencedEventStore.* +import com.digitalasset.canton.tracing.{SerializableTraceContext, TraceContext} +import com.digitalasset.canton.util.{EitherTUtil, Thereafter} +import com.digitalasset.canton.version.{ProtocolVersion, UntypedVersionedMessage, VersionedMessage} +import slick.jdbc.{GetResult, SetParameter} + +import java.util.concurrent.Semaphore +import scala.concurrent.{ExecutionContext, Future, blocking} + +class DbSequencedEventStore( + override protected val storage: DbStorage, + client: SequencerClientDiscriminator, + protocolVersion: ProtocolVersion, + override protected val timeouts: ProcessingTimeout, + override protected val loggerFactory: NamedLoggerFactory, +)(implicit val ec: ExecutionContext) + extends SequencedEventStore + with DbStore + with DbPrunableByTime[SequencerClientDiscriminator] { + + override protected[this] val partitionKey: SequencerClientDiscriminator = client + + override protected[this] def partitionColumn: String = "client" + + override protected[this] def pruning_status_table: String = "sequenced_event_store_pruning" + + /** Semaphore to prevent concurrent writes to the db. + * Concurrent calls can be problematic because they may introduce gaps in the stored sequencer counters. + * The methods [[ignoreEvents]] and [[unignoreEvents]] are not meant to be executed concurrently. + */ + private val semaphore: Semaphore = new Semaphore(1) + + private def withLock[F[_], A](caller: String)(body: => F[A])(implicit + thereafter: Thereafter[F], + traceContext: TraceContext, + ): F[A] = { + import Thereafter.syntax.* + // Avoid unnecessary call to blocking, if a permit is available right away. + if (!semaphore.tryAcquire()) { + // This should only occur when the caller is ignoring events, so ok to log with info level. + logger.info(s"Delaying call to $caller, because another write is in progress.") + blocking(semaphore.acquireUninterruptibly()) + } + body.thereafter(_ => semaphore.release()) + } + + override protected[this] implicit def setParameterDiscriminator + : SetParameter[SequencerClientDiscriminator] = + SequencerClientDiscriminator.setClientDiscriminatorParameter + + import com.digitalasset.canton.store.SequencedEventStore.* + import storage.api.* + import storage.converters.* + + override protected val processingTime: TimedLoadGauge = + storage.metrics.loadGaugeM("sequenced-event-store") + + implicit val getResultPossiblyIgnoredSequencedEvent: GetResult[PossiblyIgnoredSerializedEvent] = + GetResult { r => + val typ = r.<<[SequencedEventDbType] + val sequencerCounter = r.<<[SequencerCounter] + val timestamp = r.<<[CantonTimestamp] + val eventBytes = r.<<[Array[Byte]] + val traceContext: TraceContext = r.<<[SerializableTraceContext].unwrap + val ignore = r.<<[Boolean] + + val getTrafficState = + SequencedEventTrafficState.sequencedEventTrafficStateGetResult(r) + + typ match { + case SequencedEventDbType.IgnoredEvent => + IgnoredSequencedEvent(timestamp, sequencerCounter, None, getTrafficState)( + traceContext + ) + case _ => + val signedEvent = ProtoConverter + .protoParserArray(UntypedVersionedMessage.parseFrom)(eventBytes) + .map(VersionedMessage.apply) + .flatMap(SignedContent.fromProtoVersioned(_)) + .flatMap(_.deserializeContent(SequencedEvent.fromByteString)) + .valueOr(err => + throw new DbDeserializationException(s"Failed to deserialize sequenced event: $err") + ) + if (ignore) { + IgnoredSequencedEvent( + timestamp, + sequencerCounter, + Some(signedEvent), + getTrafficState, + )( + traceContext + ) + } else { + OrdinarySequencedEvent(signedEvent, getTrafficState)( + traceContext + ) + } + } + } + + private implicit val traceContextSetParameter: SetParameter[SerializableTraceContext] = + SerializableTraceContext.getVersionedSetParameter(protocolVersion) + + override def store( + events: Seq[OrdinarySerializedEvent] + )(implicit traceContext: TraceContext, externalCloseContext: CloseContext): Future[Unit] = { + + if (events.isEmpty) Future.unit + else + processingTime.event { + withLock(functionFullName) { + CloseContext.withCombinedContext(closeContext, externalCloseContext, timeouts, logger) { + combinedCloseContext => + storage + .queryAndUpdate(bulkInsertQuery(events), functionFullName)( + traceContext, + combinedCloseContext, + ) + .void + } + } + } + } + + private def bulkInsertQuery( + events: Seq[PossiblyIgnoredSerializedEvent] + )(implicit traceContext: TraceContext): DBIOAction[Unit, NoStream, Effect.All] = { + val insertSql = storage.profile match { + case _: DbStorage.Profile.Oracle => + """merge /*+ INDEX ( sequenced_events ( ts, client ) ) */ + |into sequenced_events + |using (select ? client, ? ts from dual) input + |on (sequenced_events.ts = input.ts and sequenced_events.client = input.client) + |when not matched then + | insert (client, ts, sequenced_event, type, sequencer_counter, trace_context, ignore, extra_traffic_remainder, extra_traffic_consumed) + | values (input.client, input.ts, ?, ?, ?, ?, ?, ?, ?)""".stripMargin + + case _ => + "insert into sequenced_events (client, ts, sequenced_event, type, sequencer_counter, trace_context, ignore, extra_traffic_remainder, extra_traffic_consumed) " + + "values (?, ?, ?, ?, ?, ?, ?, ?, ?) " + + "on conflict do nothing" + } + DbStorage.bulkOperation_(insertSql, events, storage.profile) { pp => event => + pp >> partitionKey + pp >> event.timestamp + pp >> event.underlyingEventBytes + pp >> event.dbType + pp >> event.counter + pp >> SerializableTraceContext(event.traceContext) + pp >> event.isIgnored + pp >> event.trafficState.map(_.extraTrafficRemainder) + pp >> event.trafficState.map(_.extraTrafficConsumed) + } + } + + override def find(criterion: SequencedEventStore.SearchCriterion)(implicit + traceContext: TraceContext + ): EitherT[Future, SequencedEventNotFoundError, PossiblyIgnoredSerializedEvent] = + processingTime.eitherTEvent { + val query = criterion match { + case ByTimestamp(timestamp) => + // The implementation assumes that we timestamps on sequenced events increases monotonically with the sequencer counter + // It therefore is fine to take the first event that we find. + sql"""select type, sequencer_counter, ts, sequenced_event, trace_context, ignore, extra_traffic_remainder, extra_traffic_consumed from sequenced_events + where client = $partitionKey and ts = $timestamp""" + case LatestUpto(inclusive) => + sql"""select type, sequencer_counter, ts, sequenced_event, trace_context, ignore, extra_traffic_remainder, extra_traffic_consumed from sequenced_events + where client = $partitionKey and ts <= $inclusive + order by ts desc #${storage.limit(1)}""" + } + + storage + .querySingle(query.as[PossiblyIgnoredSerializedEvent].headOption, functionFullName) + .toRight(SequencedEventNotFoundError(criterion)) + } + + override def findRange(criterion: SequencedEventStore.RangeCriterion, limit: Option[Int])(implicit + traceContext: TraceContext + ): EitherT[Future, SequencedEventRangeOverlapsWithPruning, Seq[PossiblyIgnoredSerializedEvent]] = + EitherT(processingTime.event { + criterion match { + case ByTimestampRange(lowerInclusive, upperInclusive) => + for { + events <- storage.query( + sql"""select type, sequencer_counter, ts, sequenced_event, trace_context, ignore, extra_traffic_remainder, extra_traffic_consumed from sequenced_events + where client = $partitionKey and $lowerInclusive <= ts and ts <= $upperInclusive + order by ts #${limit.fold("")(storage.limit(_))}""" + .as[PossiblyIgnoredSerializedEvent], + functionFullName, + ) + // check for pruning after we've read the events so that we certainly catch the case + // if pruning is started while we're reading (as we're not using snapshot isolation here) + pruningO <- pruningStatus + } yield pruningO match { + case Some(pruningStatus) if pruningStatus.timestamp >= lowerInclusive => + Left(SequencedEventRangeOverlapsWithPruning(criterion, pruningStatus, events)) + case _ => + Right(events) + } + } + }) + + override def sequencedEvents( + limit: Option[Int] = None + )(implicit traceContext: TraceContext): Future[Seq[PossiblyIgnoredSerializedEvent]] = { + processingTime.event { + storage.query( + sql"""select type, sequencer_counter, ts, sequenced_event, trace_context, ignore, extra_traffic_remainder, extra_traffic_consumed from sequenced_events + where client = $partitionKey + order by ts #${limit.fold("")(storage.limit(_))}""" + .as[PossiblyIgnoredSerializedEvent], + functionFullName, + ) + } + } + + override protected[canton] def doPrune( + untilInclusive: CantonTimestamp, + lastPruning: Option[CantonTimestamp], + )(implicit traceContext: TraceContext): Future[Unit] = + processingTime.event { + val query = + sqlu"delete from sequenced_events where client = $partitionKey and ts <= $untilInclusive" + storage + .update(query, functionFullName) + .map { nrPruned => + logger.info( + s"Pruned at least $nrPruned entries from the sequenced event store of client $partitionKey older or equal to $untilInclusive" + ) + } + } + + override def ignoreEvents(fromInclusive: SequencerCounter, untilInclusive: SequencerCounter)( + implicit traceContext: TraceContext + ): EitherT[Future, ChangeWouldResultInGap, Unit] = + withLock(functionFullName) { + for { + _ <- appendEmptyIgnoredEvents(fromInclusive, untilInclusive) + _ <- EitherT.right(setIgnoreStatus(fromInclusive, untilInclusive, ignore = true)) + } yield () + } + + private def appendEmptyIgnoredEvents( + fromInclusive: SequencerCounter, + untilInclusive: SequencerCounter, + )(implicit + traceContext: TraceContext + ): EitherT[Future, ChangeWouldResultInGap, Unit] = + processingTime.eitherTEvent { + for { + lastSequencerCounterAndTimestampO <- EitherT.right( + storage.query( + sql"""select sequencer_counter, ts from sequenced_events where client = $partitionKey + order by sequencer_counter desc #${storage.limit(1)}""" + .as[(SequencerCounter, CantonTimestamp)] + .headOption, + functionFullName, + ) + ) + + (firstSc, firstTs) = lastSequencerCounterAndTimestampO match { + case Some((lastSc, lastTs)) => (lastSc + 1, lastTs.immediateSuccessor) + case None => + // Starting with MinValue.immediateSuccessor, because elsewhere we assume that MinValue is a strict lower bound on event timestamps. + (fromInclusive, CantonTimestamp.MinValue.immediateSuccessor) + } + + _ <- EitherTUtil.condUnitET[Future]( + fromInclusive <= firstSc || fromInclusive > untilInclusive, + ChangeWouldResultInGap(firstSc, fromInclusive - 1), + ) + + events = ((firstSc max fromInclusive) to untilInclusive).map { sc => + val ts = firstTs.addMicros(sc - firstSc) + IgnoredSequencedEvent(ts, sc, None, None)(traceContext) + } + + _ <- EitherT.right(storage.queryAndUpdate(bulkInsertQuery(events), functionFullName)) + } yield () + } + + private def setIgnoreStatus( + fromInclusive: SequencerCounter, + toInclusive: SequencerCounter, + ignore: Boolean, + )(implicit + traceContext: TraceContext + ): Future[Unit] = processingTime.event { + storage.update_( + sqlu"update sequenced_events set ignore = $ignore where client = $partitionKey and $fromInclusive <= sequencer_counter and sequencer_counter <= $toInclusive", + functionFullName, + ) + } + + override def unignoreEvents(from: SequencerCounter, to: SequencerCounter)(implicit + traceContext: TraceContext + ): EitherT[Future, ChangeWouldResultInGap, Unit] = + withLock(functionFullName) { + for { + _ <- deleteEmptyIgnoredEvents(from, to) + _ <- EitherT.right(setIgnoreStatus(from, to, ignore = false)) + } yield () + } + + private def deleteEmptyIgnoredEvents(from: SequencerCounter, to: SequencerCounter)(implicit + traceContext: TraceContext + ): EitherT[Future, ChangeWouldResultInGap, Unit] = + processingTime.eitherTEvent { + for { + lastNonEmptyEventSequencerCounter <- EitherT.right( + storage.query( + sql"""select sequencer_counter from sequenced_events + where client = $partitionKey and type != ${SequencedEventDbType.IgnoredEvent} + order by sequencer_counter desc #${storage.limit(1)}""" + .as[SequencerCounter] + .headOption, + functionFullName, + ) + ) + + fromEffective = lastNonEmptyEventSequencerCounter.fold(from)(c => (c + 1) max from) + + lastSequencerCounter <- EitherT.right( + storage.query( + sql"""select sequencer_counter from sequenced_events + where client = $partitionKey + order by sequencer_counter desc #${storage.limit(1)}""" + .as[SequencerCounter] + .headOption, + functionFullName, + ) + ) + + _ <- EitherTUtil.condUnitET[Future]( + lastSequencerCounter.forall(_ <= to) || fromEffective > to, + ChangeWouldResultInGap(fromEffective, to), + ) + + _ <- EitherT.right( + storage.update( + sqlu"""delete from sequenced_events + where client = $partitionKey and type = ${SequencedEventDbType.IgnoredEvent} + and $fromEffective <= sequencer_counter and sequencer_counter <= $to""", + functionFullName, + ) + ) + } yield () + } + + private[canton] override def delete( + from: SequencerCounter + )(implicit traceContext: TraceContext): Future[Unit] = + processingTime.event { + storage.update_( + sqlu"delete from sequenced_events where client = $partitionKey and sequencer_counter >= $from", + functionFullName, + ) + } +} + +object DbSequencedEventStore { + sealed trait SequencedEventDbType { + val name: String3 + } + + object SequencedEventDbType { + + case object Deliver extends SequencedEventDbType { + override val name: String3 = String3.tryCreate("del") + } + + case object DeliverError extends SequencedEventDbType { + override val name: String3 = String3.tryCreate("err") + } + + case object IgnoredEvent extends SequencedEventDbType { + override val name: String3 = String3.tryCreate("ign") + } + + implicit val setParameterSequencedEventType: SetParameter[SequencedEventDbType] = (v, pp) => + pp >> v.name + + implicit val getResultSequencedEventType: GetResult[SequencedEventDbType] = GetResult(r => + r.nextString() match { + case Deliver.name.str => Deliver + case DeliverError.name.str => DeliverError + case IgnoredEvent.name.str => IgnoredEvent + case unknown => + throw new DbDeserializationException(s"Unknown sequenced event type [$unknown]") + } + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbSequencerCounterTrackerStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbSequencerCounterTrackerStore.scala new file mode 100644 index 0000000000..21fa881fac --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/DbSequencerCounterTrackerStore.scala @@ -0,0 +1,38 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store.db + +import com.digitalasset.canton.SequencerCounterDiscriminator +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.lifecycle.Lifecycle +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.resource.DbStorage +import com.digitalasset.canton.store.SequencerCounterTrackerStore + +import scala.concurrent.ExecutionContext + +class DbSequencerCounterTrackerStore( + client: SequencerClientDiscriminator, + storage: DbStorage, + override protected val timeouts: ProcessingTimeout, + override protected val loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends SequencerCounterTrackerStore + with NamedLogging { + override protected[store] val cursorStore = + new DbCursorPreheadStore[SequencerCounterDiscriminator]( + client, + storage, + DbSequencerCounterTrackerStore.cursorTable, + storage.metrics.loadGaugeM("sequencer-counter-tracker-store"), + timeouts, + loggerFactory, + ) + + override def onClosed(): Unit = Lifecycle.close(cursorStore)(logger) +} + +object DbSequencerCounterTrackerStore { + val cursorTable = "head_sequencer_counters" +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/RequiredTypesCodec.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/RequiredTypesCodec.scala new file mode 100644 index 0000000000..76c8fc1965 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/RequiredTypesCodec.scala @@ -0,0 +1,53 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store.db + +import cats.syntax.either.* +import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt, PositiveLong} +import slick.jdbc.{GetResult, SetParameter} + +object RequiredTypesCodec { + def positiveLongDBDeserializer(l: Long): PositiveLong = + PositiveLong.create(l).getOrElse { + throw new DbDeserializationException( + s"$l cannot be deserialized to a positive long" + ) + } + + def positiveIntDBDeserializer(i: Int): PositiveInt = + PositiveInt.create(i).getOrElse { + throw new DbDeserializationException( + s"$i cannot be deserialized to a positive int" + ) + } + + implicit val positiveLongSetResult: SetParameter[PositiveLong] = (v: PositiveLong, pp) => + pp >> v.value + + implicit val positiveIntSetResult: SetParameter[PositiveInt] = (v: PositiveInt, pp) => + pp >> v.value + + implicit val positiveLongGetResult: GetResult[PositiveLong] = + GetResult.GetLong.andThen(positiveLongDBDeserializer) + + implicit val positiveIntGetResult: GetResult[PositiveInt] = + GetResult.GetInt.andThen(positiveIntDBDeserializer) + + def nonNegativeLongDBDeserializer(l: Long): NonNegativeLong = + NonNegativeLong.create(l).valueOr { err => + throw new DbDeserializationException( + s"$l cannot be deserialized to a non negative long: $err" + ) + } + + implicit val nonNegativeLongSetResult: SetParameter[NonNegativeLong] = (v: NonNegativeLong, pp) => + pp >> v.value + + implicit val nonNegativeLongGetResult: GetResult[NonNegativeLong] = + GetResult.GetLong.andThen(nonNegativeLongDBDeserializer) + + implicit val nonNegativeLongOptionGetResult: GetResult[Option[NonNegativeLong]] = + GetResult.GetLongOption.andThen(_.map(nonNegativeLongDBDeserializer)) + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/SequencerClientDiscriminator.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/SequencerClientDiscriminator.scala new file mode 100644 index 0000000000..9420eee8e1 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/db/SequencerClientDiscriminator.scala @@ -0,0 +1,52 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store.db + +import com.digitalasset.canton.store.{IndexedDomain, IndexedMember, IndexedStringStore} +import com.digitalasset.canton.topology.{DomainId, DomainMember} +import com.digitalasset.canton.util.NoCopy +import slick.jdbc.SetParameter + +import scala.concurrent.{ExecutionContext, Future} + +/** We typically have a database per node but there can be many owners of a SequencerClient within that node. + * + * For a domain the mediator and topology manager may have their own SequencerClient instances. + * For a participant each domain the participant connects to will have its own SequencerClient. + * We use this discriminator to allow storing state for all sequencer clients in the same table. + * + * To ensure that we truly use different "static_string" indexes, we'll require the + * indexes to be generated by code in this file so that we can discriminate the client data + */ +sealed trait SequencerClientDiscriminator extends NoCopy { + + /** indexed use within the database */ + def index: Int +} + +object SequencerClientDiscriminator { + + final case class DomainDiscriminator(domainId: DomainId, override val index: Int) + extends NoCopy + with SequencerClientDiscriminator + + final case class DomainMemberDiscriminator(member: DomainMember, override val index: Int) + extends NoCopy + with SequencerClientDiscriminator + + implicit val setClientDiscriminatorParameter: SetParameter[SequencerClientDiscriminator] = + (v, pp) => pp.setInt(v.index) + + def fromDomainMember(member: DomainMember, indexedStringStore: IndexedStringStore)(implicit + ec: ExecutionContext + ): Future[SequencerClientDiscriminator] = { + IndexedMember.indexed(indexedStringStore)(member).map { mb => + DomainMemberDiscriminator(member, mb.index) + } + } + + def fromIndexedDomainId(domainId: IndexedDomain): DomainDiscriminator = + DomainDiscriminator(domainId.item, domainId.index) + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryCursorPreheadStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryCursorPreheadStore.scala new file mode 100644 index 0000000000..192a31ac3c --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryCursorPreheadStore.scala @@ -0,0 +1,64 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store.memory + +import com.digitalasset.canton.concurrent.DirectExecutionContext +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.resource.TransactionalStoreUpdate +import com.digitalasset.canton.store.{CursorPrehead, CursorPreheadStore} +import com.digitalasset.canton.tracing.TraceContext +import com.google.common.annotations.VisibleForTesting + +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.{ExecutionContext, Future} + +class InMemoryCursorPreheadStore[Discr](protected val loggerFactory: NamedLoggerFactory) + extends CursorPreheadStore[Discr] + with NamedLogging { + + override private[store] implicit val ec: ExecutionContext = + DirectExecutionContext(noTracingLogger) + + private val preheadRef = new AtomicReference[Option[CursorPrehead[Discr]]](None) + + override def prehead(implicit + traceContext: TraceContext + ): Future[Option[CursorPrehead[Discr]]] = + Future.successful(preheadRef.get()) + + @VisibleForTesting + private[canton] override def overridePreheadUnsafe(newPrehead: Option[CursorPrehead[Discr]])( + implicit traceContext: TraceContext + ): Future[Unit] = + Future.successful(preheadRef.set(newPrehead)) + + override def advancePreheadToTransactionalStoreUpdate( + newPrehead: CursorPrehead[Discr] + )(implicit traceContext: TraceContext): TransactionalStoreUpdate = + TransactionalStoreUpdate.InMemoryTransactionalStoreUpdate { + val _ = preheadRef.getAndUpdate { + case None => Some(newPrehead) + case old @ Some(oldPrehead) => + if (oldPrehead.counter < newPrehead.counter) Some(newPrehead) else old + } + } + + override def rewindPreheadTo( + newPreheadO: Option[CursorPrehead[Discr]] + )(implicit traceContext: TraceContext): Future[Unit] = { + logger.info(s"Rewinding prehead to $newPreheadO") + newPreheadO match { + case None => preheadRef.set(None) + case Some(newPrehead) => + val _ = preheadRef.getAndUpdate { + case None => None + case old @ Some(oldPrehead) => + if (oldPrehead.counter > newPrehead.counter) Some(newPrehead) else old + } + } + Future.unit + } + + override def close(): Unit = () +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryIndexedStringStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryIndexedStringStore.scala new file mode 100644 index 0000000000..d4929f13b7 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryIndexedStringStore.scala @@ -0,0 +1,63 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store.memory + +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.config.CantonRequireTypes.String300 +import com.digitalasset.canton.store.{IndexedStringStore, IndexedStringType} + +import scala.collection.concurrent.TrieMap +import scala.collection.mutable.ArrayBuffer +import scala.concurrent.{Future, blocking} + +/** In memory version of an indexed string store. + * + * @param minIndex the minimum value of assigned indices (for testing purposes) + * @param maxIndex the maximum value of assigned indices (for testing purposes) + */ +class InMemoryIndexedStringStore(val minIndex: Int, val maxIndex: Int) extends IndexedStringStore { + + private val cache = TrieMap[(String300, Int), Int]() + private val list = ArrayBuffer[(String300, Int)]() + + override def getOrCreateIndex(dbTyp: IndexedStringType, str: String300): Future[Int] = + Future.successful(getOrCreateIndexForTesting(dbTyp, str)) + + /** @throws java.lang.IllegalArgumentException if a new index is created and the new index would exceed `maxIndex` + */ + def getOrCreateIndexForTesting(dbTyp: IndexedStringType, str: String300): Int = blocking { + synchronized { + val key = (str, dbTyp.source) + cache.get(key) match { + case Some(value) => value + case None => + val idx = list.length + minIndex + require(idx <= maxIndex, s"New index $idx would exceed the maximum index $maxIndex.") + list.append(key).discard + cache.put(key, idx).discard + idx + } + } + } + + override def getForIndex(dbTyp: IndexedStringType, idx: Int): Future[Option[String300]] = + Future.successful { + blocking { + synchronized { + val positionInList = idx - minIndex + if (positionInList >= 0 && list.lengthCompare(positionInList) > 0) { + val (str, source) = list(positionInList) + if (source == dbTyp.source) Some(str) else None + } else None + } + } + } + + override def close(): Unit = () +} + +object InMemoryIndexedStringStore { + // Start with 1 by default to have same behavior as the db backed store. + def apply(): InMemoryIndexedStringStore = new InMemoryIndexedStringStore(1, Int.MaxValue) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryPrunableByTime.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryPrunableByTime.scala new file mode 100644 index 0000000000..cc0491d6a9 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryPrunableByTime.scala @@ -0,0 +1,54 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store.memory + +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.NamedLogging +import com.digitalasset.canton.pruning.{PruningPhase, PruningStatus} +import com.digitalasset.canton.store.PrunableByTime +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.OptionUtil + +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.Future + +/** Mixin for a in-memory store that provides a thread-safe storage slot for the latest point in time when + * pruning has started or finished. + * + * The pruning method of the store must use [[advancePruningTimestamp]] to signal the start end completion + * of each pruning. + */ +trait InMemoryPrunableByTime extends PrunableByTime { this: NamedLogging => + + protected[this] val pruningStatusF: AtomicReference[Option[PruningStatus]] = + new AtomicReference[Option[PruningStatus]](None) + + override def pruningStatus(implicit + traceContext: TraceContext + ): Future[Option[PruningStatus]] = + Future.successful { + pruningStatusF.get + } + + protected[canton] def advancePruningTimestamp(phase: PruningPhase, timestamp: CantonTimestamp)( + implicit traceContext: TraceContext + ): Future[Unit] = Future.successful { + val previousO = + pruningStatusF.getAndAccumulate( + Some( + PruningStatus(phase, timestamp, Option.when(phase == PruningPhase.Completed)(timestamp)) + ), + OptionUtil.mergeWith(_, _)(Ordering[PruningStatus].max), + ) + if (logger.underlying.isDebugEnabled && phase == PruningPhase.Started) { + previousO match { + case Some(previous) if previous.timestamp > timestamp => + logger.debug( + s"Pruning at $timestamp started after another later pruning at ${previous.timestamp}." + ) + case _ => + } + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemorySendTrackerStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemorySendTrackerStore.scala new file mode 100644 index 0000000000..4ba1af91ca --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemorySendTrackerStore.scala @@ -0,0 +1,45 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store.memory + +import cats.data.EitherT +import cats.syntax.either.* +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.sequencing.protocol.MessageId +import com.digitalasset.canton.store.SavePendingSendError.MessageIdAlreadyTracked +import com.digitalasset.canton.store.{SavePendingSendError, SendTrackerStore} +import com.digitalasset.canton.tracing.TraceContext + +import scala.collection.concurrent.TrieMap +import scala.concurrent.{ExecutionContext, Future} + +class InMemorySendTrackerStore(implicit executionContext: ExecutionContext) + extends SendTrackerStore { + private val pendingSends = new TrieMap[MessageId, CantonTimestamp]() + + override def fetchPendingSends(implicit + traceContext: TraceContext + ): Future[Map[MessageId, CantonTimestamp]] = + Future.successful(pendingSends.toMap) + + override def savePendingSend(messageId: MessageId, maxSequencingTime: CantonTimestamp)(implicit + traceContext: TraceContext + ): EitherT[Future, SavePendingSendError, Unit] = { + // if putIfAbsent returns a value it means there was an existing item + pendingSends + .putIfAbsent(messageId, maxSequencingTime) + .toLeft(()) + .leftMap(_ => MessageIdAlreadyTracked: SavePendingSendError) + .toEitherT[Future] + } + + override def removePendingSend( + messageId: MessageId + )(implicit traceContext: TraceContext): Future[Unit] = { + pendingSends -= messageId + Future.unit + } + + override def close(): Unit = () +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemorySequencedEventStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemorySequencedEventStore.scala new file mode 100644 index 0000000000..83079962ae --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemorySequencedEventStore.scala @@ -0,0 +1,240 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store.memory + +import cats.data.EitherT +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.SequencerCounter +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.CloseContext +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.sequencing.{OrdinarySerializedEvent, PossiblyIgnoredSerializedEvent} +import com.digitalasset.canton.store.SequencedEventStore.* +import com.digitalasset.canton.store.{ + ChangeWouldResultInGap, + SequencedEventNotFoundError, + SequencedEventRangeOverlapsWithPruning, + SequencedEventStore, +} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ShowUtil.* + +import scala.collection.mutable +import scala.concurrent.{ExecutionContext, Future, blocking} + +/** In memory implementation of a [[SequencedEventStore]]. + */ +class InMemorySequencedEventStore(protected val loggerFactory: NamedLoggerFactory)(implicit + val ec: ExecutionContext +) extends SequencedEventStore + with NamedLogging + with InMemoryPrunableByTime { + + private val lock = new Object() + + /** Invariant: + * The sequenced event stored at timestamp `ts` has timestamp `ts`. + */ + private val eventByTimestamp: mutable.SortedMap[CantonTimestamp, PossiblyIgnoredSerializedEvent] = + mutable.SortedMap.empty + + /** Invariants: + * - The value set equals the key set of `eventsByTimestamp`. + * - `eventsByTimestamp(timestampOfCounter(sc))` has sequencer counter `sc` + */ + private val timestampOfCounter: mutable.SortedMap[SequencerCounter, CantonTimestamp] = + mutable.SortedMap.empty + + def store( + events: Seq[OrdinarySerializedEvent] + )(implicit traceContext: TraceContext, closeContext: CloseContext): Future[Unit] = + NonEmpty.from(events).fold(Future.unit) { events => + logger.debug( + show"Storing delivery events from ${events.head1.timestamp} to ${events.last1.timestamp}." + ) + + blocking(lock.synchronized { + events.foreach { e => + eventByTimestamp.getOrElseUpdate(e.timestamp, e).discard + timestampOfCounter.getOrElseUpdate(e.counter, e.timestamp).discard + } + }) + Future.unit + } + + override def find(criterion: SequencedEventStore.SearchCriterion)(implicit + traceContext: TraceContext + ): EitherT[Future, SequencedEventNotFoundError, PossiblyIgnoredSerializedEvent] = { + + logger.debug(s"Looking to retrieve delivery event $criterion") + blocking(lock.synchronized { + val resO = criterion match { + case ByTimestamp(timestamp) => + eventByTimestamp.get(timestamp) + case LatestUpto(inclusive) => + eventByTimestamp.rangeTo(inclusive).lastOption.map { case (_, event) => event } + } + + EitherT(Future.successful(resO.toRight(SequencedEventNotFoundError(criterion)))) + }) + } + + override def findRange(criterion: RangeCriterion, limit: Option[Int])(implicit + traceContext: TraceContext + ): EitherT[Future, SequencedEventRangeOverlapsWithPruning, Seq[ + PossiblyIgnoredSerializedEvent + ]] = { + logger.debug(s"Looking to retrieve delivery event $criterion") + blocking(lock.synchronized { + criterion match { + case ByTimestampRange(lowerInclusive, upperInclusive) => + val valuesInRangeIterable = + eventByTimestamp.rangeFrom(lowerInclusive).rangeTo(upperInclusive).values + // Copy the elements, as the returned iterator will otherwise explode if the underlying collection is modified. + val result = limit.fold(valuesInRangeIterable)(valuesInRangeIterable.take).toList + + pruningStatusF.get match { + case Some(pruningStatus) if pruningStatus.timestamp >= lowerInclusive => + EitherT.leftT[Future, Seq[PossiblyIgnoredSerializedEvent]]( + SequencedEventRangeOverlapsWithPruning(criterion, pruningStatus, result) + ) + case _ => + EitherT.rightT[Future, SequencedEventRangeOverlapsWithPruning](result) + } + } + }) + } + + override def sequencedEvents( + limit: Option[Int] = None + )(implicit traceContext: TraceContext): Future[Seq[PossiblyIgnoredSerializedEvent]] = + blocking(lock.synchronized { + // Always copy the elements, as the returned iterator will otherwise explode if the underlying collection is modified. + Future.successful(eventByTimestamp.values.take(limit.getOrElse(Int.MaxValue)).toList) + }) + + override def doPrune( + beforeAndIncluding: CantonTimestamp, + lastPruning: Option[CantonTimestamp], + )(implicit traceContext: TraceContext): Future[Unit] = Future.successful { + blocking(lock.synchronized { + eventByTimestamp.rangeTo(beforeAndIncluding).foreach { case (ts, e) => + eventByTimestamp.remove(ts).discard + timestampOfCounter.remove(e.counter).discard + } + }) + } + + override def ignoreEvents(from: SequencerCounter, to: SequencerCounter)(implicit + traceContext: TraceContext + ): EitherT[Future, ChangeWouldResultInGap, Unit] = + EitherT.fromEither { + blocking(lock.synchronized { + for { + _ <- appendEmptyIgnoredEvents(from, to) + } yield { + setIgnoreStatus(from, to, ignore = true) + } + }) + } + + private def appendEmptyIgnoredEvents(from: SequencerCounter, to: SequencerCounter)(implicit + traceContext: TraceContext + ): Either[ChangeWouldResultInGap, Unit] = { + val lastScAndTs = timestampOfCounter.lastOption + + val (firstSc, firstTs) = lastScAndTs match { + case Some((lastSc, lastTs)) => (lastSc + 1, lastTs.immediateSuccessor) + case None => + // Starting with MinValue.immediateSuccessor, because elsewhere we assume that MinValue is a strict lower bound on event timestamps. + (from, CantonTimestamp.MinValue.immediateSuccessor) + } + + if (from <= firstSc) { + val timestamps = (firstSc to to).map { sc => + val ts = firstTs.addMicros(sc - firstSc) + sc -> ts + }.toMap + timestampOfCounter.addAll(timestamps) + + val events = timestamps.map { case (sc, ts) => + ts -> IgnoredSequencedEvent(ts, sc, None, None)(traceContext) + } + eventByTimestamp.addAll(events) + + Right(()) + } else if (from > to) { + Right(()) + } else { + Left(ChangeWouldResultInGap(firstSc, from - 1)) + } + } + + private def setIgnoreStatus(from: SequencerCounter, to: SequencerCounter, ignore: Boolean): Unit = + if (from <= to) { + val timestamps = timestampOfCounter.rangeFrom(from).rangeTo(to).values + + val newEvents = timestamps.map { ts => + val oldEvent = eventByTimestamp(ts) + val newEvent = if (ignore) oldEvent.asIgnoredEvent else oldEvent.asOrdinaryEvent + ts -> newEvent + }.toMap + + eventByTimestamp.addAll(newEvents) + } + + override def unignoreEvents(from: SequencerCounter, to: SequencerCounter)(implicit + traceContext: TraceContext + ): EitherT[Future, ChangeWouldResultInGap, Unit] = + EitherT.fromEither { + blocking(lock.synchronized { + for { + _ <- deleteEmptyIgnoredEvents(from, to) + } yield setIgnoreStatus(from, to, ignore = false) + }) + } + + private def deleteEmptyIgnoredEvents( + from: SequencerCounter, + to: SequencerCounter, + ): Either[ChangeWouldResultInGap, Unit] = { + val lastNonEmptyEventSc = + timestampOfCounter + .filter { case (_, ts) => eventByTimestamp(ts).underlying.isDefined } + .lastOption + .map { case (sc, _) => sc } + + val fromEffective = lastNonEmptyEventSc.fold(from)(c => (c + 1).max(from)) + + val lastSc = timestampOfCounter.lastOption.map { case (sc, _) => sc } + + if (fromEffective <= to) { + if (lastSc.forall(_ <= to)) { + timestampOfCounter.rangeFrom(fromEffective).rangeTo(to).foreach { case (sc, ts) => + eventByTimestamp.remove(ts).discard + timestampOfCounter.remove(sc).discard + } + Right(()) + + } else { + Left(ChangeWouldResultInGap(fromEffective, to)) + } + } else { + Right(()) + } + } + + private[canton] override def delete( + from: SequencerCounter + )(implicit traceContext: TraceContext): Future[Unit] = { + timestampOfCounter.rangeFrom(from).foreach { case (sc, ts) => + timestampOfCounter.remove(sc).discard + eventByTimestamp.remove(ts).discard + } + + Future.unit + } + + override def close(): Unit = () +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemorySequencerCounterTrackerStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemorySequencerCounterTrackerStore.scala new file mode 100644 index 0000000000..f052230047 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemorySequencerCounterTrackerStore.scala @@ -0,0 +1,20 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.store.memory + +import com.digitalasset.canton.SequencerCounterDiscriminator +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.store.SequencerCounterTrackerStore + +class InMemorySequencerCounterTrackerStore( + override val loggerFactory: NamedLoggerFactory, + override val timeouts: ProcessingTimeout, +) extends SequencerCounterTrackerStore + with NamedLogging { + override protected[store] val cursorStore = + new InMemoryCursorPreheadStore[SequencerCounterDiscriminator](loggerFactory) + + override def onClosed(): Unit = () +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/Clock.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/Clock.scala new file mode 100644 index 0000000000..3bf7aee643 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/Clock.scala @@ -0,0 +1,518 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.time + +import cats.data.EitherT +import com.daml.error.{ErrorCategory, ErrorCode, Explanation, Resolution} +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.concurrent.Threading +import com.digitalasset.canton.config.{ClientConfig, ProcessingTimeout} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.error.CantonError +import com.digitalasset.canton.error.CantonErrorGroups.ClockErrorGroup +import com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown +import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, Lifecycle, UnlessShutdown} +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.networking.grpc.ClientChannelBuilder +import com.digitalasset.canton.time.Clock.SystemClockRunningBackwards +import com.digitalasset.canton.topology.admin.v0.InitializationServiceGrpc +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.canton.util.{ErrorUtil, PriorityBlockingQueueUtil} +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.empty.Empty + +import java.time.{Clock as JClock, Duration, Instant} +import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong, AtomicReference} +import java.util.concurrent.{Callable, PriorityBlockingQueue, TimeUnit} +import scala.annotation.tailrec +import scala.concurrent.{ExecutionContextExecutor, Future, Promise} +import scala.util.{Random, Try} + +/** A clock returning the current time, but with a twist: it always + * returns unique timestamps. If two calls are made to the same clock + * instance at the same time (according to the resolution of this + * clock), one of the calls will block, until it can return a unique + * value. + * + * All public functions are thread-safe. + */ +abstract class Clock() extends TimeProvider with AutoCloseable with NamedLogging { + + protected val last = new AtomicReference[CantonTimestamp](CantonTimestamp.Epoch) + private val backwardsClockAlerted = new AtomicReference[CantonTimestamp](CantonTimestamp.Epoch) + + /** Potentially non-monotonic system clock + * + * Never use Instant.now, use the clock (as we also support sim-clock). If you need to ensure + * that the clock is monotonically increasing, use the [[uniqueTime]] method instead. + */ + def now: CantonTimestamp + + def isSimClock: Boolean = this match { + case _: SimClock => true + case _ => false + } + protected def warnIfClockRunsBackwards: Boolean = false + + protected case class Queued(action: CantonTimestamp => Unit, timestamp: CantonTimestamp) { + + val promise = Promise[UnlessShutdown[Unit]]() + + def run(now: CantonTimestamp): Unit = + promise.complete(Try(UnlessShutdown.Outcome(action(now)))) + + } + + protected def addToQueue(queue: Queued): Unit + + /** thread safe weakly monotonic time: each timestamp will be either equal or increasing + * May go backwards across restarts. + */ + final def monotonicTime(): CantonTimestamp = internalMonotonicTime(0) + + /** thread safe strongly monotonic increasing time: each timestamp will be unique + * May go backwards across restarts. + */ + final def uniqueTime(): CantonTimestamp = internalMonotonicTime(1) + + private def internalMonotonicTime(minSpacingMicros: Long): CantonTimestamp = { + // `now` may block, so we call it before entering the `updateAndGet` block below. + val nowSnapshot = now + last.updateAndGet { oldTs => + if (oldTs.isBefore(nowSnapshot)) + nowSnapshot + else { + // emit warning if clock is running backwards + if ( + warnIfClockRunsBackwards && // turn of warning for simclock, as access to now and last might be racy + oldTs.isAfter(nowSnapshot.plusSeconds(1L)) && backwardsClockAlerted + .get() + .isBefore(nowSnapshot.minusSeconds(30)) + ) { + import TraceContext.Implicits.Empty.* + backwardsClockAlerted.set(nowSnapshot) + SystemClockRunningBackwards.Error(nowSnapshot, oldTs).discard + } + if (minSpacingMicros > 0) + oldTs.addMicros(minSpacingMicros) + else oldTs + } + } + } + + protected val tasks = + new PriorityBlockingQueue[Queued]( + PriorityBlockingQueueUtil.DefaultInitialCapacity, + (o1: Queued, o2: Queued) => o1.timestamp.compareTo(o2.timestamp), + ) + + /** Thread-safely schedule an action to be executed in the future + * + * If the provided `delta` is not positive the action skips queueing and is executed immediately. + * + * Same as other schedule method, except it expects a differential time amount + */ + def scheduleAfter( + action: CantonTimestamp => Unit, + delta: Duration, + ): FutureUnlessShutdown[Unit] = + scheduleAt(action, now.add(delta)) + + /** Thread-safely schedule an action to be executed in the future + * actions need not execute in the order of their timestamps. + * + * If the provided timestamp is before `now`, the action skips queueing and is executed immediately. + * + * @param action action to run at the given timestamp (passing in the timestamp for when the task was scheduled) + * @param timestamp timestamp when to run the task + * @return a future for the given task + */ + def scheduleAt( + action: CantonTimestamp => Unit, + timestamp: CantonTimestamp, + ): FutureUnlessShutdown[Unit] = { + val queued = Queued(action, timestamp) + if (!now.isBefore(timestamp)) { + queued.run(now) + } else { + addToQueue(queued) + } + FutureUnlessShutdown(queued.promise.future) + } + + // flush the task queue, stopping once we hit a task in the future + @tailrec + private def doFlush(): Option[CantonTimestamp] = { + val queued = Option(tasks.poll()) + queued match { + // if no task present, do nothing + case None => None + case Some(item) => + // if task is present but in the future, put it back + val currentTime = now + if (item.timestamp > currentTime) { + tasks.add(item) + // If the clock was advanced concurrently while this call was checking the task's time against now + // then the task will not `run` until the next call to `flush`. So if we see that the time was advanced + // rerun `flush()`. + if (now >= item.timestamp) doFlush() + else Some(item.timestamp) + } else { + // otherwise execute task and iterate + item.run(currentTime) + doFlush() + } + } + } + + protected def flush(): Option[CantonTimestamp] = doFlush() + + protected def failTasks(): Unit = { + @tailrec def go(): Unit = + Option(tasks.poll()) match { + case None => () + case Some(item) => + item.promise.success(AbortedDueToShutdown) + go() + } + go() + } + + override def nowInMicrosecondsSinceEpoch: Long = uniqueTime().underlying.micros +} + +object Clock extends ClockErrorGroup { + + @Explanation("""This error is emitted if the unique time generation detects that the host system clock is lagging behind + |the unique time source by more than a second. This can occur if the system processes more than 2e6 events per second (unlikely) + |or when the underlying host system clock is running backwards.""") + @Resolution( + """Inspect your host system. Generally, the unique time source is not negatively affected by a clock moving backwards + |and will keep functioning. Therefore, this message is just a warning about something strange being detected.""" + ) + object SystemClockRunningBackwards + extends ErrorCode( + id = "SYSTEM_CLOCK_RUNNING_BACKWARDS", + ErrorCategory.BackgroundProcessDegradationWarning, + ) { + final case class Error(now: CantonTimestamp, oldUniqueTime: CantonTimestamp)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + show"WallClock's system clock seems to be moving backwards: now=$now vs uniqueTs=$oldUniqueTime", + throwableO = None, + ) + } + +} + +trait TickTock { + def now: Instant +} + +object TickTock { + object Native extends TickTock { + private val jclock = JClock.systemUTC() + def now: Instant = jclock.instant() + } + class RandomSkew(maxSkewMillis: Int) extends TickTock { + + private val changeSkewMillis = Math.max(maxSkewMillis / 10, 1) + + private val jclock = JClock.systemUTC() + private val random = new Random(0) + private val skew = new AtomicLong( + (random.nextInt(2 * maxSkewMillis + 1) - maxSkewMillis).toLong + ) + private val last = new AtomicLong(0) + + private def updateSkew(current: Long): Long = { + val upd = random.nextInt(2 * changeSkewMillis + 1) - changeSkewMillis + val next = current + upd + if (next > maxSkewMillis) maxSkewMillis.toLong + else if (next < -maxSkewMillis) -maxSkewMillis.toLong + else next + } + + private def updateLast(current: Long): Long = { + val nextSkew = skew.updateAndGet(updateSkew) + val instant = jclock.instant().toEpochMilli + Math.max(instant + nextSkew, current + 1) + } + + def now: Instant = Instant.ofEpochMilli(last.updateAndGet(updateLast)) + + } +} + +class WallClock( + timeouts: ProcessingTimeout, + override val loggerFactory: NamedLoggerFactory, + tickTock: TickTock = TickTock.Native, +) extends Clock + with NamedLogging { + + last.set(CantonTimestamp.assertFromInstant(tickTock.now)) + + def now: CantonTimestamp = CantonTimestamp.assertFromInstant(tickTock.now) + override protected def warnIfClockRunsBackwards: Boolean = true + + // Keeping a dedicated scheduler, as it may have to run long running tasks. + // Once all tasks are guaranteed to be "light", the environment scheduler can be used. + private val scheduler = Threading.singleThreadScheduledExecutor( + loggerFactory.threadName + "-wallclock", + noTracingLogger, + ) + private val running = new AtomicBoolean(true) + + override def close(): Unit = { + import com.digitalasset.canton.concurrent.* + if (running.getAndSet(false)) { + Lifecycle.close( + () => failTasks(), + () => ExecutorServiceExtensions(scheduler)(logger, timeouts).close("clock"), + )(logger) + } + } + + override protected def addToQueue(queued: Queued): Unit = { + val head = Option(tasks.peek()) + val scheduleNew = head match { + case Some(item) => item.timestamp.isAfter(queued.timestamp) + case None => true + } + tasks.add(queued) + if (scheduleNew) { + scheduleNext(queued.timestamp) + } + } + + private val nextFlush = new AtomicReference[Option[CantonTimestamp]](None) + // will schedule a new flush at the given time + private def scheduleNext(timestamp: CantonTimestamp): Unit = { + if (running.get()) { + // update next flush reference if this timestamp is before the current scheduled + val newTimestamp = Some(timestamp) + def updateCondition(current: Option[CantonTimestamp]): Boolean = + current.forall(_ > timestamp) + val current = nextFlush.getAndUpdate { old => + if (updateCondition(old)) newTimestamp else old + } + val needsSchedule = updateCondition(current) + if (needsSchedule) { + // add one ms as we will process all tasks up to now() which means that if we use ms precision, + // we need to set it to the next ms such that we include all tasks within the same ms + val delta = Math.max(Duration.between(now.toInstant, timestamp.toInstant).toMillis, 1) + 1 + val _ = scheduler.schedule( + new Runnable() { + override def run(): Unit = { + // mark that this flush has been executed before starting the flush, + // (if something else is queued after our flush but before re-scheduling, it will + // succeed in scheduling instead of this thread). + // we only set it to None if nextFlush matches this one + nextFlush.compareAndSet(newTimestamp, None) + flush().foreach(scheduleNext) + } + }, + delta, + TimeUnit.MILLISECONDS, + ) + } + } + } + +} + +class SimClock( + start: CantonTimestamp = CantonTimestamp.Epoch, + val loggerFactory: NamedLoggerFactory, +) extends Clock + with NamedLogging { + + private val value = new AtomicReference[CantonTimestamp](start) + last.set(start) + + def now: CantonTimestamp = value.get() + + override def flush(): Option[CantonTimestamp] = super.flush() + + def advanceTo(timestamp: CantonTimestamp, doFlush: Boolean = true)(implicit + traceContext: TraceContext + ): Unit = { + ErrorUtil.requireArgument( + now.isBefore(timestamp) || now == timestamp, + s"advanceTo failed with time going backwards: current timestamp is $now and request is $timestamp", + ) + logger.info(s"Advancing sim clock to $timestamp") + value.updateAndGet(_.max(timestamp)) + if (doFlush) { + flush().discard[Option[CantonTimestamp]] + } + } + + def advance(duration: Duration)(implicit traceContext: TraceContext): Unit = { + ErrorUtil.requireArgument(!duration.isNegative, show"Duration must not be negative: $duration.") + logger.info(s"Advancing sim clock by $duration") + value.updateAndGet(_.add(duration)) + flush().discard[Option[CantonTimestamp]] + } + + override def close(): Unit = {} + + override protected def addToQueue(queue: Queued): Unit = { + val _ = tasks.add(queue) + } + + def reset(): Unit = { + failTasks() + value.set(start) + last.set(start) + } + + override def toString: String = s"SimClock($now)" + + @VisibleForTesting + def numberOfScheduledTasks: Int = tasks.size() +} + +class RemoteClock( + config: ClientConfig, + timeouts: ProcessingTimeout, + val loggerFactory: NamedLoggerFactory, +)(implicit val ec: ExecutionContextExecutor) + extends Clock + with NamedLogging { + + // same as wall-clock: we might use the normal execution context if the tasks are guaranteed to be light + private val scheduler = Threading.singleThreadScheduledExecutor( + loggerFactory.threadName + "-remoteclock", + noTracingLogger, + ) + private val channel = ClientChannelBuilder.createChannelToTrustedServer(config) + private val service = InitializationServiceGrpc.stub(channel) + private val running = new AtomicBoolean(true) + private val updating = new AtomicReference[Option[CantonTimestamp]](None) + + backgroundUpdate() + + private def backgroundUpdate(): Unit = { + if (running.get()) { + update().discard + + val _ = scheduler.schedule( + new Callable[Unit] { + override def call(): Unit = backgroundUpdate() + }, + 500, + TimeUnit.MILLISECONDS, + ) + } + } + + private def update(): CantonTimestamp = { + // the update method is invoked on every call to now() + // in a remote sim-clock setting, we don't know when we need to flush() + // therefore, we try to flush whenever a timestamp is requested. + // however, we need to avoid recursion if `clock.now` is invoked from within the flush() + // therefore, we use an atomic reference to lock the access to the flush and while + // the system is flushing, we just keep on returning the stored timestamp + val ret = updating.get() match { + // on an access to `now` while we are updating, just return the cached timestamp + case Some(tm) => tm + case None => + // fetch current timestamp + val tm = getRemoteTime + // see if something has been racing with us. if so, use other timestamp + updating.getAndUpdate { + case None => Some(tm) + case Some(racyTm) => Some(racyTm) + } match { + // no race, flush! + case None => + flush().discard + updating.set(None) + tm + // on a race, return racy timestamp + case Some(racyTm) => racyTm + } + } + ret + } + + override def now: CantonTimestamp = update() + + @tailrec + private def getRemoteTime: CantonTimestamp = { + val req = for { + pbTimestamp <- EitherT.right[ProtoDeserializationError](service.currentTime(Empty())) + timestamp <- EitherT.fromEither[Future](CantonTimestamp.fromProtoPrimitive(pbTimestamp)) + } yield timestamp + import TraceContext.Implicits.Empty.* + timeouts.network.await("fetching remote time")(req.value) match { + case Right(tm) => tm + case Left(err) => + // we are forgiving here. a background process might start faster than the foreground process + // so the grpc call might fail because the API is not online. but as we are doing testing only, + // we don't make a big fuss about it, just emit a log and retry + noTracingLogger.info( + s"Failed to fetch remote time from ${config.port.unwrap}: ${err}. Will try again" + ) + Threading.sleep(500) + getRemoteTime + } + } + + override protected def addToQueue(queue: Queued): Unit = { + val _ = tasks.add(queue) + } + + override def close(): Unit = + if (running.getAndSet(false)) { + // stopping the scheduler before the channel, so we don't get a failed call on shutdown + scheduler.shutdown() + Lifecycle.toCloseableChannel(channel, logger, "channel to remote clock server").close() + } +} + +/** This implementation allows us to control many independent sim clocks at the same time. + * Possible race conditions that might happen with concurrent start/stop of clocks are not + * being addressed but are currently unlikely to happen. + * @param currentClocks a function that returns all current running sim clocks + * @param start start time of this clock + */ +class DelegatingSimClock( + currentClocks: () => Seq[SimClock], + val start: CantonTimestamp = CantonTimestamp.Epoch, + loggerFactory: NamedLoggerFactory, +) extends SimClock(start, loggerFactory) { + + override def advanceTo(timestamp: CantonTimestamp, doFlush: Boolean = true)(implicit + traceContext: TraceContext + ): Unit = ErrorUtil.withThrowableLogging { + super.advanceTo(timestamp, doFlush) + currentClocks().foreach(_.advanceTo(now, doFlush = false)) + // avoid race conditions between nodes by first adjusting the time and then flushing the tasks + if (doFlush) + currentClocks().foreach(_.flush().discard) + } + + override def advance(duration: Duration)(implicit traceContext: TraceContext): Unit = + ErrorUtil.withThrowableLogging { + super.advance(duration) + // avoid race conditions between nodes by first adjusting the time and then flushing the tasks + currentClocks().foreach(_.advanceTo(now, doFlush = false)) + currentClocks().foreach(_.flush().discard) + } + + override def close(): Unit = { + super.close() + currentClocks().foreach(_.close()) + } + + override def reset(): Unit = { + super.reset() + currentClocks().foreach(_.reset()) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/DomainTimeTracker.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/DomainTimeTracker.scala new file mode 100644 index 0000000000..0bd60a4755 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/DomainTimeTracker.scala @@ -0,0 +1,490 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.time + +import cats.syntax.option.* +import com.daml.nameof.NameOf.functionFullName +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.{DomainTimeTrackerConfig, ProcessingTimeout} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown, UnlessShutdown} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.sequencing.OrdinaryApplicationHandler +import com.digitalasset.canton.sequencing.client.SequencerClient +import com.digitalasset.canton.sequencing.protocol.Envelope +import com.digitalasset.canton.store.SequencedEventStore.OrdinarySequencedEvent +import com.digitalasset.canton.time.DomainTimeTracker.* +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.tracing.TraceContext.withNewTraceContext +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.* +import com.digitalasset.canton.version.ProtocolVersion +import com.google.common.annotations.VisibleForTesting + +import java.util.concurrent.PriorityBlockingQueue +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.{ExecutionContext, Future, Promise, blocking} + +/** Provides a variety of methods for tracking time on the domain. + * - fetchTime and fetchTimeProof allows for proactively asking for a recent time or time proof. + * - requestTick asks the tracker to ensure that an event is witnessed for the given time or greater (useful for timeouts). + * - awaitTick will return a future to wait for the given time being reached on the target domain. + * + * We currently assume that the domain and our host are roughly synchronized + * and typically won't expect to see a time on a domain until we have passed that point on our local clock. + * We then wait for `observationLatency` past the timestamp we are expecting to elapse on our local clock + * as transmission of an event with that timestamp will still take some time to arrive at our host. + * This avoids frequently asking for times before we've reached the timestamps we're looking for locally. + * + * We also take into account a `patienceDuration` that will cause us to defer asking for a time if we + * have recently seen events for the domain. This is particularly useful if we are significantly behind and + * reading many old events from the domain. + * + * If no activity is happening on the domain we will try to ensure that we have observed an event at least once + * during the `minObservationDuration`. + */ +class DomainTimeTracker( + config: DomainTimeTrackerConfig, + clock: Clock, + timeRequestSubmitter: TimeProofRequestSubmitter, + protected val timeouts: ProcessingTimeout, + protected val loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext) + extends NamedLogging + with FlagCloseable + with HasFlushFuture { + + // timestamps that we are waiting to observe held in an ascending order queue + // modifications to pendingTicks must be made while holding the `lock` + private val pendingTicks: PriorityBlockingQueue[AwaitingTick] = + new PriorityBlockingQueue[AwaitingTick]( + PriorityBlockingQueueUtil.DefaultInitialCapacity, + AwaitingTick.ordering, + ) + + /** Ensures that changes to [[timestampRef]] and [[pendingTicks]] happen atomically */ + private val lock: AnyRef = new Object + + private def withLock[A](fn: => A): A = { + blocking { + lock.synchronized { fn } + } + } + + // the maximum timestamp we can support waiting for without causing an overflow + private val maxPendingTick = CantonTimestamp.MaxValue.minus(config.observationLatency.asJava) + + private val timestampRef: AtomicReference[LatestAndNext[CantonTimestamp]] = + new AtomicReference[LatestAndNext[CantonTimestamp]](LatestAndNext.empty) + + private val timeProofRef: AtomicReference[LatestAndNext[TimeProof]] = + new AtomicReference[LatestAndNext[TimeProof]](LatestAndNext.empty) + + // kick off the scheduling to ensure we see timestamps at least occasionally + ensureMinObservationDuration() + + /** Fetch the latest timestamp we have observed from the domain. + * Note this isn't restored on startup so will be empty until the first event after starting is seen. + */ + def latestTime: Option[CantonTimestamp] = timestampRef.get().latest.map(_.value) + + /** Fetches a recent domain timestamp. + * If the latest received event has been received within the given `freshnessBound` (measured on the participant clock) this domain timestamp + * will be immediately returned. + * If a sufficiently fresh timestamp is unavailable then a request for a time proof will be made, however + * the returned future will be resolved by the first event after this call (which may not necessarily be + * the response to our time proof request). + * + * @return The future completes with the domain's timestamp of the event. + * So if the participant's local clock is ahead of the domain clock, + * the timestamp may be earlier than now minus the freshness bound. + */ + def fetchTime(freshnessBound: NonNegativeFiniteDuration = NonNegativeFiniteDuration.Zero)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[CantonTimestamp] = + fetch(freshnessBound, timestampRef, requiresTimeProof = false) + + /** Similar to `fetchTime` but will only return time proof. */ + def fetchTimeProof(freshnessBound: NonNegativeFiniteDuration = NonNegativeFiniteDuration.Zero)( + implicit traceContext: TraceContext + ): FutureUnlessShutdown[TimeProof] = + fetch(freshnessBound, timeProofRef, requiresTimeProof = true) + + /** Register that we want to observe a domain time. + * The tracker will attempt to make sure that we observe a sequenced event with this timestamp or greater. + * If "immediately" is configured and the clock is a SimClock, a new time proof will be fetched. + * + * The maximum timestamp that we support waiting for is [[data.CantonTimestamp.MaxValue]] minus the configured + * observation latency. If a greater value is provided a warning will be logged but no error will be + * thrown or returned. + */ + def requestTick(ts: CantonTimestamp, immediately: Boolean = false)(implicit + traceContext: TraceContext + ): Unit = + requestTicks(Seq(ts), immediately) + + /** Register that we want to observe domain times. + * The tracker will attempt to make sure that we observe a sequenced event with the given timestamps or greater. + * If "immediately" is configured and the clock is a SimClock, a new time proof will be fetched. + * + * The maximum timestamp that we support waiting for is [[data.CantonTimestamp.MaxValue]] minus the configured + * observation latency. If a greater value is provided a warning will be logged but no error will be + * thrown or returned. + */ + def requestTicks(timestamps: Seq[CantonTimestamp], immediately: Boolean = false)(implicit + traceContext: TraceContext + ): Unit = { + val (toRequest, tooLarge) = timestamps.partition(_ < maxPendingTick) + + NonEmpty.from(tooLarge).foreach { tooLarge => + val first = tooLarge.min1 + val last = tooLarge.max1 + logger.warn( + s"Ignoring request for ${tooLarge.size} ticks from $first to $last as they are too large" + ) + } + + if (toRequest.nonEmpty) { + withLock { + toRequest.foreach { tick => + pendingTicks.put(new AwaitingTick(tick)) + } + } + maybeScheduleUpdate(immediately) + } + } + + /** Waits for an event with a timestamp greater or equal to `ts` to be observed from the domain. + * If we have already witnessed an event with a timestamp equal or exceeding the given `ts` then `None` + * will be returned. + */ + def awaitTick( + ts: CantonTimestamp + )(implicit traceContext: TraceContext): Option[Future[CantonTimestamp]] = { + val latest = timestampRef.get().latest + if (latest.exists(_.value >= ts)) { + logger.debug(s"No await time for ${ts} as we are already at $latest") + None + } else { + logger.debug(s"Await time for ${ts} as we are at ${latest.map(_.value)} ") + // wait for this timestamp to be observed + val promise = Promise[CantonTimestamp]() + withLock { + pendingTicks.put(new AwaitingTick(ts, promise.some)) + } + maybeScheduleUpdate() + promise.future.some + } + } + + /** Create a [[sequencing.OrdinaryApplicationHandler]] for updating this time tracker */ + def wrapHandler[Env <: Envelope[_]]( + handler: OrdinaryApplicationHandler[Env] + ): OrdinaryApplicationHandler[Env] = handler.replace { tracedEvents => + tracedEvents.withTraceContext { implicit batchTraceContext => events => + update(events) + + // call the wrapped handler + handler(tracedEvents) + } + } + + /** Inform the domain time tracker about the first message the sequencer client resubscribes to from the sequencer. + * This is never considered a time proof event. + */ + def subscriptionResumesAfter( + timestamp: CantonTimestamp + )(implicit traceContext: TraceContext): Unit = { + withLock { + logger.debug(s"Initializing domain time tracker for resubscription at $timestamp") + updateTimestampRef(timestamp) + removeTicks(timestamp) + } + } + + @VisibleForTesting + private[time] def update(events: Seq[OrdinarySequencedEvent[Envelope[_]]])(implicit + batchTraceContext: TraceContext + ): Unit = { + withLock { + def updateOne(event: OrdinarySequencedEvent[Envelope[_]]): Unit = { + updateTimestampRef(event.timestamp) + TimeProof.fromEventO(event).foreach { proof => + val oldTimeProof = timeProofRef.getAndSet(LatestAndNext(received(proof).some, None)) + oldTimeProof.next.foreach(_.trySuccess(UnlessShutdown.Outcome(proof))) + timeRequestSubmitter.handleTimeProof(proof) + } + } + + // currently all actions from events are synchronous and do not return errors so this simple processing is safe. + // for timestamps we could just take the latest event in batch, however as we're also looking for time proofs + // we supply every event sequentially. + // this could likely be optimised to just process the latest time proof and timestamp from the batch if required. + events.foreach(updateOne) + events.lastOption.foreach(event => removeTicks(event.timestamp)) + } + maybeScheduleUpdate() + } + + /** Must only be used inside [[withLock]] */ + private def updateTimestampRef(newTimestamp: CantonTimestamp): Unit = { + val oldTimestamp = + timestampRef.getAndSet(LatestAndNext(received(newTimestamp).some, None)) + oldTimestamp.next.foreach(_.trySuccess(UnlessShutdown.Outcome(newTimestamp))) + } + + /** Must only be used inside [[withLock]] */ + @SuppressWarnings(Array("org.wartremover.warts.While")) + private def removeTicks(ts: CantonTimestamp): Unit = { + // remove pending ticks up to and including this timestamp + while (Option(pendingTicks.peek()).exists(_.ts <= ts)) { + val removed = pendingTicks.poll() + // complete any futures waiting for them + removed.complete() + } + } + + private def fetch[A]( + freshnessBound: NonNegativeFiniteDuration, + latestAndNextRef: AtomicReference[LatestAndNext[A]], + requiresTimeProof: Boolean, + )(implicit traceContext: TraceContext): FutureUnlessShutdown[A] = + performUnlessClosing(functionFullName) { + val now = clock.now + val receivedWithin = now.minus(freshnessBound.unwrap) + + val (future, needUpdate) = withLock { + val newState = latestAndNextRef.updateAndGet { latestAndNext => + latestAndNext.latest match { + case Some(Received(_value, receivedAt)) if receivedAt >= receivedWithin => + latestAndNext + case _latest => latestAndNext.withNextSet + } + } + newState.latest match { + case Some(Received(value, receivedAt)) if receivedAt >= receivedWithin => + FutureUnlessShutdown.pure(value) -> false + case _ => + val promise = newState.next.getOrElse( + ErrorUtil.internalError( + new IllegalStateException("Should have set to a promise in prior block") + ) + ) + + // if we're looking for a time proof then just request one; no need to call `maybeScheduleUpdate()` + // as the TimeProofRequestSubmitter itself retries if it doesn't get one soon enough. + // otherwise if looking for a timestamp we don't care what domain time we're looking for (just the next), + // so just register a pending tick for the earliest point. + // we use MinValue rather than Epoch so it will still be considered far before "now" when initially started + // using the simclock. + if (requiresTimeProof) timeRequestSubmitter.fetchTimeProof() + else pendingTicks.put(new AwaitingTick(CantonTimestamp.MinValue)) + FutureUnlessShutdown(promise.future) -> !requiresTimeProof + } + } + if (needUpdate) maybeScheduleUpdate() + future + }.onShutdown(FutureUnlessShutdown.abortedDueToShutdown) + + /** When we expect to observe the earliest timestamp in local time. */ + @VisibleForTesting + private[time] def earliestExpectedObservationTime(): Option[CantonTimestamp] = + Option(withLock(pendingTicks.peek())).map(_.ts.add(config.observationLatency.asJava)) + + /** Local time of when we'd like to see the next event produced. + * If we are waiting to observe a timestamp, this value will be the greater (see note below) of: + * - the local time of when we'd like to see the earliest tick + * - the time we last received an event offset plus the configured patience duration + * + * This allows doing nothing for a long period if the timestamp we're looking at is far in the future. + * However if the domain is far behind but regularly producing events we will wait until we haven't + * witnessed an event for the patience duration. + * + * Note: for sim clock, we always take the earliestExpectedObservationTime. The reason is that progressing the + * clock may lead to sudden big differences between local clock and timestamps on sequencer messages + * which lead to some check that decides whether a time proof should be requested not being done. + * + * The issue arise in the following case: + * - Check is scheduled at t1 + * - Time is progressed at t3 > t1 + * - An event is received with sequencing time t2, with t1 < t2 < t3 + * - Then, the max would lead to t3 which skips the request for a time proof + */ + private def nextScheduledCheck()(implicit traceContext: TraceContext): Option[CantonTimestamp] = { + // if we're not waiting for an event, then we don't need to see one + // Only request an event if the time tracker has observed a time; + // otherwise the submission may fail because the node does not have any signing keys registered + earliestExpectedObservationTime().flatMap { earliestExpectedObservationTime => + val latest = timestampRef.get().latest + if (latest.isEmpty) { + logger.debug( + s"Not scheduling a next check at $earliestExpectedObservationTime because no timestamp has been observed from the domain" + ) + } + + val timeFromReceivedEvent = latest.map(_.receivedAt.add(config.patienceDuration.asJava)) + + clock match { + case _: SimClock => latest.map(_ => earliestExpectedObservationTime) + case _ => timeFromReceivedEvent.map(_.max(earliestExpectedObservationTime)) + } + } + } + + /** we're unable to cancel an update once scheduled, so if we decide to schedule an earlier update than an update already + * scheduled we update this to the earlier value and then check this value when the scheduled task is run + */ + private val nextScheduledUpdate: AtomicReference[Option[CantonTimestamp]] = + new AtomicReference[Option[CantonTimestamp]](None) + + /** After [[pendingTicks]] or [[timestampRef]] have been updated, call this to determine whether a scheduled update is required. + * It will be scheduled if there isn't an existing or earlier update pending and + * the time tracker has observed at least some timestamp or if "immediately" is true and the clock is a SimClock. + */ + private def maybeScheduleUpdate( + immediately: Boolean = false + )(implicit traceContext: TraceContext): Unit = { + + def updateNow(): Unit = { + // Fine to repeatedly call without guards as the submitter will make no more than one request in-flight at once + // The next call to update will complete the promise in `timestampRef.get().next`. + timeRequestSubmitter.fetchTimeProof() + } + if (clock.isSimClock && immediately) updateNow() + else { + nextScheduledCheck() foreach { updateBy => + // if we've already surpassed when we wanted to see a time, just ask for one + // means that we're waiting on a timestamp and we're not receiving regular updates + val now = clock.now + if (updateBy <= now) updateNow() + else { + def updateCondition(current: Option[CantonTimestamp]): Boolean = + !current.exists(ts => ts > now && ts <= updateBy) + + val current = nextScheduledUpdate.getAndUpdate { current => + if (updateCondition(current)) updateBy.some else current + } + if (updateCondition(current)) { + // schedule next update + val nextF = + clock.scheduleAt(_ => maybeScheduleUpdate(immediately = false), updateBy).unwrap + addToFlushAndLogError(s"scheduled update at $updateBy")(nextF) + } + } + } + } + } + + private def received[A](value: A) = Received(value, receivedAt = clock.now) + + @VisibleForTesting + protected[time] def flush(): Future[Unit] = doFlush() + + override def onClosed(): Unit = { + Seq(timeProofRef, timestampRef).foreach { ref => + ref.get().next.foreach(_.trySuccess(UnlessShutdown.AbortedDueToShutdown)) + } + timeRequestSubmitter.close() + } + + /** In the absence of any real activity on the domain we will infrequently request a time. + * Short of being aware of a relatively recent domain time, it will allow features like sequencer pruning + * to keep a relatively recent acknowledgment point for the member even if they're not doing anything. + */ + private def ensureMinObservationDuration(): Unit = withNewTraceContext { implicit traceContext => + val minObservationDuration = config.minObservationDuration.asJava + def performUpdate(expectedUpdateBy: CantonTimestamp): Unit = + performUnlessClosing(functionFullName) { + val lastObserved = timestampRef.get().latest.map(_.receivedAt) + + // did we see an event within the observation window + if (lastObserved.exists(_ >= expectedUpdateBy.minus(minObservationDuration))) { + // we did + scheduleNextUpdate() + } else { + // we didn't so ask for a time + logger.debug( + s"The minimum observation duration $minObservationDuration has elapsed since last observing the domain time (${lastObserved.map(_.toString).getOrElse("never")}) so will request a proof of time" + ) + FutureUtil.doNotAwait( + // fetchTime shouldn't fail (if anything it will never complete due to infinite retries or closing) + // but ensure schedule is called regardless + fetchTime() + .thereafter(_ => scheduleNextUpdate()) + .onShutdown(logger.debug("Stopped fetch time due to shutdown")), + "Failed to fetch a time to ensure the minimum observation duration", + ) + } + }.onShutdown(()) + + def scheduleNextUpdate(): Unit = + performUnlessClosing(functionFullName) { + val latestTimestamp = timestampRef.get().latest.fold(clock.now)(_.receivedAt) + val expectUpdateBy = latestTimestamp.add(minObservationDuration).immediateSuccessor + + val _ = clock.scheduleAt(performUpdate, expectUpdateBy) + }.onShutdown(()) + + scheduleNextUpdate() + } + +} + +object DomainTimeTracker { + + private class AwaitingTick( + val ts: CantonTimestamp, + promiseO: Option[Promise[CantonTimestamp]] = None, + ) { + def complete(): Unit = promiseO.foreach(_.trySuccess(ts)) + } + private object AwaitingTick { + implicit val ordering: Ordering[AwaitingTick] = Ordering.by(_.ts) + } + + /** Keep track of a value, and when we received said value, measured on the participant's clock */ + final case class Received[+A](value: A, receivedAt: CantonTimestamp) + + /** Keep track of the latest value received and a promise to complete when the next one arrives + * It is not a case class so that equality is object identity (equality on promises is anyway object identity). + */ + class LatestAndNext[A]( + val latest: Option[Received[A]], + val next: Option[Promise[UnlessShutdown[A]]], + ) { + def withNextSet: LatestAndNext[A] = + next.fold(LatestAndNext(latest, Promise[UnlessShutdown[A]]().some))(_ => this) + } + object LatestAndNext { + def apply[A]( + latest: Option[Received[A]], + next: Option[Promise[UnlessShutdown[A]]], + ): LatestAndNext[A] = + new LatestAndNext(latest, next) + def empty[A]: LatestAndNext[A] = LatestAndNext(None, None) + } + + def apply( + config: DomainTimeTrackerConfig, + clock: Clock, + sequencerClient: SequencerClient, + protocolVersion: ProtocolVersion, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit executionContext: ExecutionContext): DomainTimeTracker = + new DomainTimeTracker( + config, + clock, + TimeProofRequestSubmitter( + config.timeRequest, + clock, + sequencerClient, + protocolVersion, + timeouts, + loggerFactory, + ), + timeouts, + loggerFactory, + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/PeriodicAction.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/PeriodicAction.scala new file mode 100644 index 0000000000..bb964ecfbf --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/PeriodicAction.scala @@ -0,0 +1,35 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.time + +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.lifecycle.FlagCloseable +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.{ExecutionContext, Future} + +class PeriodicAction( + clock: Clock, + interval: NonNegativeFiniteDuration, + protected val loggerFactory: NamedLoggerFactory, + protected val timeouts: ProcessingTimeout, + description: String, +)(check: TraceContext => Future[_])(implicit + executionContext: ExecutionContext +) extends NamedLogging + with FlagCloseable { + + TraceContext.withNewTraceContext(setupNextCheck()(_)) + + private def runCheck()(implicit traceContext: TraceContext): Unit = + performUnlessClosingF(s"run-$description")(check(traceContext)) + .onComplete(_ => setupNextCheck()) + + private def setupNextCheck()(implicit traceContext: TraceContext): Unit = + performUnlessClosing(s"setup-$description") { + val _ = clock.scheduleAfter(_ => runCheck(), interval.duration) + }.discard + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/RefinedDurations.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/RefinedDurations.scala new file mode 100644 index 0000000000..f189dd7513 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/RefinedDurations.scala @@ -0,0 +1,285 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.time + +import cats.syntax.either.* +import com.digitalasset.canton.ProtoDeserializationError.ValueConversionError +import com.digitalasset.canton.checked +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveNumeric} +import com.digitalasset.canton.config.{ + NonNegativeFiniteDuration as NonNegativeFiniteDurationConfig, + PositiveDurationSeconds as PositiveDurationSecondsConfig, + PositiveFiniteDuration as PositiveFiniteDurationConfig, +} +import com.digitalasset.canton.data.{CantonTimestamp, CantonTimestampSecond} +import com.digitalasset.canton.logging.TracedLogger +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.{DurationConverter, ParsingResult} +import com.digitalasset.canton.store.db.DbDeserializationException +import com.digitalasset.canton.tracing.TraceContext +import com.google.protobuf.duration.Duration as PbDuration +import io.circe.Encoder +import io.scalaland.chimney.Transformer +import slick.jdbc.{GetResult, SetParameter} + +import java.time.Duration +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.FiniteDuration +import scala.jdk.DurationConverters.* + +sealed trait RefinedDuration extends Ordered[RefinedDuration] { + def duration: Duration + def unwrap: Duration = duration + + def toProtoPrimitive: com.google.protobuf.duration.Duration = + DurationConverter.toProtoPrimitive(duration) + + def toScala: FiniteDuration = duration.toScala + + override def compare(that: RefinedDuration): Int = this.duration.compareTo(that.duration) +} + +trait RefinedDurationCompanion[RD <: RefinedDuration] { + + /** Factory method for creating the [[RefinedDuration]] from a [[java.time.Duration]] + * @throws java.lang.IllegalArgumentException if the duration does not satisfy the refinement predicate + */ + def tryCreate(duration: Duration): RD = + create(duration).valueOr(err => throw new IllegalArgumentException(err)) + + def create(duration: Duration): Either[String, RD] + + def between(from: CantonTimestamp, to: CantonTimestamp): Either[String, RD] = + create(Duration.between(from.toInstant, to.toInstant)) + + def between(from: CantonTimestampSecond, to: CantonTimestampSecond): Either[String, RD] = + create(Duration.between(from.toInstant, to.toInstant)) + + implicit val orderingRefinedDuration: Ordering[RD] = Ordering.by(_.duration) + + def fromProtoPrimitive( + field: String + )(durationP: PbDuration): ParsingResult[RD] = + for { + duration <- DurationConverter.fromProtoPrimitive(durationP) + refinedDuration <- create(duration).leftMap(err => ValueConversionError(field, err)) + } yield refinedDuration + + def fromProtoPrimitiveO( + field: String + )(durationPO: Option[PbDuration]): ParsingResult[RD] = + for { + durationP <- ProtoConverter.required(field, durationPO) + refinedDuration <- fromProtoPrimitive(field)(durationP) + } yield refinedDuration + + def fromBytes(field: String)(bytes: Array[Byte]): ParsingResult[RD] = + for { + durationP <- ProtoConverter.protoParserArray(PbDuration.parseFrom)(bytes) + res <- fromProtoPrimitive(field)(durationP) + } yield res + + private def getResultFromBytes(bytes: Array[Byte]) = + fromBytes("database field")(bytes).valueOr(err => + throw new DbDeserializationException(s"Failed to deserialize the duration: $err") + ) + + // JSON encoding using circe + implicit val refinedDurationEncoder: Encoder[RD] = + Encoder.forProduct1("duration")(_.duration) + + implicit val getResultRefinedDuration: GetResult[RD] = + GetResult(r => getResultFromBytes(r.nextBytes())) + + implicit val getResultRefinedDurationOption: GetResult[Option[RD]] = + GetResult(r => r.nextBytesOption().map(getResultFromBytes)) + + implicit def setParameterRefinedDuration(implicit + setParameterByteArray: SetParameter[Array[Byte]] + ): SetParameter[RD] = + (d, pp) => pp >> d.toProtoPrimitive.toByteArray + + implicit def setParameterRefinedDurationOption(implicit + setParameterByteArrayO: SetParameter[Option[Array[Byte]]] + ): SetParameter[Option[RD]] = (d, pp) => pp >> d.map(_.toProtoPrimitive.toByteArray) + + def tryOfMicros(micros: Long): RD = + tryCreate(Duration.ofSeconds(micros / 1000000).withNanos((micros % 1000000L).toInt * 1000)) + + def tryOfMillis(millis: Long): RD = tryCreate(Duration.ofMillis(millis)) + + def tryOfSeconds(secs: Long): RD = tryCreate(Duration.ofSeconds(secs)) + + def tryOfMinutes(minutes: Long): RD = tryCreate(Duration.ofMinutes(minutes)) + + def tryOfHours(hours: Long): RD = tryCreate(Duration.ofHours(hours)) + + def tryOfDays(days: Long): RD = tryCreate(Duration.ofDays(days)) +} + +final case class PositiveFiniteDuration private (duration: Duration) + extends RefinedDuration + with PrettyPrinting { + require(!duration.isNegative && !duration.isZero, s"Duration $duration must not be non-negative") + + override def pretty: Pretty[PositiveFiniteDuration] = prettyOfParam(_.duration) + + /** Returns the duration in seconds truncated to the size of Int, returns as a maximum Int.MaxValue. + * + * Usage: On the database/jdbc level many timeouts require to be specified in seconds as an integer, not a long. + */ + def toSecondsTruncated( + logger: TracedLogger + )(implicit traceContext: TraceContext): PositiveNumeric[Int] = { + val seconds = duration.getSeconds + + val result = if (seconds > Int.MaxValue) { + logger.info(s"Truncating $duration to integer") + Int.MaxValue + } else + seconds.toInt + + // Result must be positive due to assertion on duration + checked(PositiveNumeric.tryCreate(result)) + } + + def toConfig: PositiveFiniteDurationConfig = checked( + PositiveFiniteDurationConfig.tryFromJavaDuration(duration) + ) +} + +object PositiveFiniteDuration extends RefinedDurationCompanion[PositiveFiniteDuration] { + override def create(duration: Duration): Either[String, PositiveFiniteDuration] = + Either.cond( + !duration.isNegative && !duration.isZero, + PositiveFiniteDuration(duration), + s"Duration should be positive, found: $duration", + ) +} + +final case class NonNegativeFiniteDuration private (duration: Duration) + extends RefinedDuration + with PrettyPrinting { + require(!duration.isNegative, s"Duration $duration must not be negative") + + override def pretty: Pretty[NonNegativeFiniteDuration] = prettyOfParam(_.duration) + + def +(other: NonNegativeFiniteDuration): NonNegativeFiniteDuration = NonNegativeFiniteDuration( + duration.plus(other.duration) + ) + + def *(multiplicand: NonNegativeInt): NonNegativeFiniteDuration = NonNegativeFiniteDuration( + duration.multipliedBy(multiplicand.value.toLong) + ) + + def /(divisor: NonNegativeInt): NonNegativeFiniteDuration = NonNegativeFiniteDuration( + duration.dividedBy(divisor.value.toLong) + ) + + def toConfig: NonNegativeFiniteDurationConfig = checked( + NonNegativeFiniteDurationConfig.tryFromJavaDuration(duration) + ) +} + +object NonNegativeFiniteDuration extends RefinedDurationCompanion[NonNegativeFiniteDuration] { + implicit val forgetRefinementJDuration: Transformer[NonNegativeFiniteDuration, Duration] = + _.duration + implicit val forgetRefinementFDuration: Transformer[NonNegativeFiniteDuration, FiniteDuration] = + _.toScala + + implicit val toNonNegativeDurationConfig + : Transformer[NonNegativeFiniteDuration, NonNegativeFiniteDurationConfig] = _.toConfig + + val Zero: NonNegativeFiniteDuration = NonNegativeFiniteDuration(Duration.ZERO) + + override def create(duration: Duration): Either[String, NonNegativeFiniteDuration] = + Either.cond( + !duration.isNegative, + NonNegativeFiniteDuration(duration), + s"Duration should be non-negative, found: $duration", + ) + + def apply(duration: PositiveSeconds): NonNegativeFiniteDuration = checked( + NonNegativeFiniteDuration.tryCreate(duration.duration) + ) +} + +final case class NonNegativeSeconds private (duration: Duration) + extends RefinedDuration + with PrettyPrinting { + require(!duration.isNegative, s"Duration $duration must not be negative") + require(duration.getNano == 0, s"Duration $duration must be rounded to the second") + + override def pretty: Pretty[NonNegativeSeconds.this.type] = prettyOfParam(_.duration) +} + +object NonNegativeSeconds extends RefinedDurationCompanion[NonNegativeSeconds] { + val Zero: NonNegativeSeconds = NonNegativeSeconds(Duration.ZERO) + + override def create(duration: Duration): Either[String, NonNegativeSeconds] = + Either.cond( + !duration.isNegative && duration.getNano == 0, + NonNegativeSeconds(duration), + s"Duration should be non-negative and rounded to the second, found: $duration", + ) +} + +final case class PositiveSeconds private (duration: Duration) + extends RefinedDuration + with PrettyPrinting { + require(!duration.isNegative && !duration.isZero, s"Duration $duration must be positive") + require(duration.getNano == 0, s"Duration $duration must be rounded to the second") + + override def pretty: Pretty[PositiveSeconds.this.type] = prettyOfParam(_.duration) + + def toConfig: PositiveDurationSecondsConfig = checked( + PositiveDurationSecondsConfig.tryFromJavaDuration(duration) + ) + + def toFiniteDuration: FiniteDuration = + FiniteDuration(duration.toNanos, TimeUnit.NANOSECONDS).toCoarsest + + def add(i: NonNegativeSeconds): PositiveSeconds = { + val newDuration = duration.plus(i.duration) + checked(PositiveSeconds(newDuration)) + } +} + +object PositiveSeconds extends RefinedDurationCompanion[PositiveSeconds] { + implicit val toPositiveSecondsConfig + : Transformer[PositiveSeconds, PositiveDurationSecondsConfig] = + _.toConfig + + implicit val getResultPositiveSeconds: GetResult[PositiveSeconds] = + GetResult(r => tryOfSeconds(r.nextLong())) + + implicit def setParameterPositiveSeconds(implicit + setParameterLong: SetParameter[Long] + ): SetParameter[PositiveSeconds] = + (d, pp) => pp >> d.duration.getSeconds + + override def create(duration: Duration): Either[String, PositiveSeconds] = + Either.cond( + !duration.isNegative && !duration.isZero && duration.getNano == 0, + PositiveSeconds(duration), + s"Duration should be positive and rounded to the second, found: $duration", + ) +} + +object EnrichedDurations { + import com.digitalasset.canton.config + + implicit class RichNonNegativeFiniteDurationConfig(duration: config.NonNegativeFiniteDuration) { + def toInternal: NonNegativeFiniteDuration = checked( + NonNegativeFiniteDuration.tryCreate(duration.asJava) + ) + } + + implicit class RichPositiveFiniteDurationConfig(duration: config.PositiveFiniteDuration) { + def toInternal: PositiveFiniteDuration = checked( + PositiveFiniteDuration.tryCreate(duration.asJava) + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/TimeAwaiter.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/TimeAwaiter.scala new file mode 100644 index 0000000000..4d1629bd21 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/TimeAwaiter.scala @@ -0,0 +1,118 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.time + +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.{ + FutureUnlessShutdown, + PerformUnlessClosing, + UnlessShutdown, +} +import com.digitalasset.canton.logging.NamedLogging +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ErrorUtil + +import java.util.{ConcurrentModificationException, PriorityQueue} +import scala.annotation.tailrec +import scala.concurrent.{Future, Promise, blocking} +import scala.jdk.CollectionConverters.* + +/** Utility to implement a time awaiter + * + * Note, you need to invoke expireOnShutdown onClosed + */ +trait TimeAwaiter { + + this: PerformUnlessClosing & NamedLogging => + + private abstract class Awaiting[T] { + val promise: Promise[T] = Promise[T]() + def shutdown(): Boolean + def success(): Unit + } + private class General extends Awaiting[Unit] { + override def shutdown(): Boolean = false + override def success(): Unit = promise.success(()) + } + private class ShutdownAware extends Awaiting[UnlessShutdown[Unit]] { + override def shutdown(): Boolean = { + promise.trySuccess(UnlessShutdown.AbortedDueToShutdown).discard + true + } + override def success(): Unit = promise.trySuccess(UnlessShutdown.unit).discard + } + + protected def expireTimeAwaiter(): Unit = + blocking(awaitTimestampFuturesLock.synchronized { + awaitTimestampFutures.iterator().asScala.foreach(_._2.shutdown().discard[Boolean]) + }) + + protected def currentKnownTime: CantonTimestamp + + protected def awaitKnownTimestamp(timestamp: CantonTimestamp): Option[Future[Unit]] = { + awaitKnownTimestampGen(timestamp, new General()).map(_.promise.future) + } + + protected def awaitKnownTimestampUS( + timestamp: CantonTimestamp + )(implicit traceContext: TraceContext): Option[FutureUnlessShutdown[Unit]] = + performUnlessClosing(s"await known timestamp at $timestamp") { + awaitKnownTimestampGen(timestamp, new ShutdownAware()) + }.map(_.map(awaiter => FutureUnlessShutdown(awaiter.promise.future))) + .onShutdown(Some(FutureUnlessShutdown.abortedDueToShutdown)) + + private def awaitKnownTimestampGen[T]( + timestamp: CantonTimestamp, + create: => Awaiting[T], + ): Option[Awaiting[T]] = { + val current = currentKnownTime + if (current >= timestamp) None + else { + val awaiter = create + blocking(awaitTimestampFuturesLock.synchronized { + awaitTimestampFutures.offer(timestamp -> awaiter).discard + }) + // If the timestamp has been advanced while we're inserting into the priority queue, + // make sure that we're completing the future. + val newCurrent = currentKnownTime + if (newCurrent >= timestamp) notifyAwaitedFutures(newCurrent) + Some(awaiter) + } + } + + /** Queue of timestamps that are being awaited on, ordered by timestamp. + * Access is synchronized via [[awaitTimestampFuturesLock]]. + */ + private val awaitTimestampFutures: PriorityQueue[(CantonTimestamp, Awaiting[_])] = + new PriorityQueue[(CantonTimestamp, Awaiting[_])]( + Ordering.by[(CantonTimestamp, Awaiting[_]), CantonTimestamp](_._1) + ) + private val awaitTimestampFuturesLock: AnyRef = new Object() + + protected def notifyAwaitedFutures(upToInclusive: CantonTimestamp): Unit = { + @tailrec def go(): Unit = Option(awaitTimestampFutures.peek()) match { + case Some(peeked @ (timestamp, awaiter)) if timestamp <= upToInclusive => + val polled = awaitTimestampFutures.poll() + // Thanks to the synchronization, the priority queue cannot be modified concurrently, + // but let's be paranoid and check. + if (peeked ne polled) { + import com.digitalasset.canton.tracing.TraceContext.Implicits.Empty.* + ErrorUtil.internalError( + new ConcurrentModificationException( + s"Insufficient synchronization in time awaiter. Peek returned $peeked, polled returned $polled" + ) + ) + } + awaiter.success() + go() + case _ => + } + + blocking(awaitTimestampFuturesLock.synchronized { + go() + }) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/TimeProof.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/TimeProof.scala new file mode 100644 index 0000000000..413d87e02b --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/TimeProof.scala @@ -0,0 +1,162 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.time + +import cats.data.EitherT +import cats.syntax.either.* +import cats.syntax.option.* +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.config.CantonRequireTypes.String73 +import com.digitalasset.canton.crypto.HashOps +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.sequencing.OrdinaryProtocolEvent +import com.digitalasset.canton.sequencing.client.{SendAsyncClientError, SequencerClient} +import com.digitalasset.canton.sequencing.protocol.* +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{HasCryptographicEvidence, ProtoConverter} +import com.digitalasset.canton.store.SequencedEventStore.{ + IgnoredSequencedEvent, + OrdinarySequencedEvent, + PossiblyIgnoredSequencedEvent, +} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.ProtocolVersion +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.ByteString + +import java.util.UUID +import scala.concurrent.Future + +/** Wrapper for a sequenced event that has the correct properties to act as a time proof: + * - a deliver event with no envelopes + * - has a message id that suggests it was requested as a time proof (this is practically unnecessary but will act + * as a safeguard against future sequenced event changes) + * @param event the signed content wrapper containing the event + * @param deliver the time proof event itself. this must be the event content signedEvent wrapper. + */ +final case class TimeProof private ( + private val event: OrdinarySequencedEvent[Envelope[_]], + private val deliver: Deliver[Nothing], +) extends PrettyPrinting + with HasCryptographicEvidence { + def timestamp: CantonTimestamp = deliver.timestamp + + def traceContext: TraceContext = event.traceContext + + override def pretty: Pretty[TimeProof.this.type] = prettyOfClass( + unnamedParam(_.timestamp) + ) + + def toProtoV0: v0.TimeProof = v0.TimeProof(Some(event.toProtoV0)) + + override def getCryptographicEvidence: ByteString = deliver.getCryptographicEvidence +} + +object TimeProof { + + private def apply( + event: OrdinarySequencedEvent[Envelope[_]], + deliver: Deliver[Nothing], + ): TimeProof = { + require( + event.signedEvent.content eq deliver, + "Time proof event must be the content of the provided signed sequencer event", + ) + new TimeProof(event, deliver) + } + + def fromProtoV0( + protocolVersion: ProtocolVersion, + hashOps: HashOps, + )(timeProofP: v0.TimeProof): ParsingResult[TimeProof] = { + val v0.TimeProof(eventPO) = timeProofP + for { + possiblyIgnoredProtocolEvent <- ProtoConverter + .required("event", eventPO) + .flatMap(PossiblyIgnoredSequencedEvent.fromProtoV0(protocolVersion, hashOps)) + event <- possiblyIgnoredProtocolEvent match { + case ordinary: OrdinaryProtocolEvent => Right(ordinary) + case _: IgnoredSequencedEvent[_] => + Left(ProtoDeserializationError.OtherError("Event is ignored, but must be ordinary.")) + } + timeProof <- fromEvent(event).leftMap(ProtoDeserializationError.OtherError) + } yield timeProof + } + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def fromEvent(event: OrdinarySequencedEvent[Envelope[_]]): Either[String, TimeProof] = + for { + deliver <- PartialFunction + .condOpt(event.signedEvent.content) { case deliver: Deliver[_] => deliver } + .toRight("Time Proof must be a deliver event") + _ <- validateDeliver(deliver) + // is now safe to cast to a `Deliver[Nothing]` as we've validated it has no envelopes + emptyDeliver = deliver.asInstanceOf[Deliver[Nothing]] + } yield new TimeProof(event, emptyDeliver) + + private def validateDeliver(deliver: Deliver[Envelope[_]]): Either[String, Unit] = { + for { + _ <- Either.cond( + isTimeEventBatch(deliver.batch), + (), + "Time Proof event should have no envelopes", + ) + _ <- Either.cond( + deliver.messageIdO.exists(isTimeEventMessageId), + (), + "Time Proof event should have an expected message id", + ) + } yield () + } + + /** Return a wrapped [[TimeProof]] if the given `event` has the correct properties. */ + def fromEventO(event: OrdinarySequencedEvent[Envelope[_]]): Option[TimeProof] = + fromEvent(event).toOption + + /** Is the event a time proof */ + def isTimeProofDeliver(deliver: Deliver[Envelope[_]]): Boolean = + validateDeliver(deliver).isRight + + /** Does the submission request look like a request to create a time event */ + def isTimeProofSubmission(submission: SubmissionRequest): Boolean = + isTimeEventMessageId(submission.messageId) && isTimeEventBatch(submission.batch) + + /** Send placed alongside the validation logic for a time proof to help ensure it remains consistent */ + def sendRequest( + client: SequencerClient, + protocolVersion: ProtocolVersion, + )(implicit traceContext: TraceContext): EitherT[Future, SendAsyncClientError, Unit] = + client.sendAsyncUnauthenticatedOrNot( + // we intentionally ask for an empty event to be sequenced to observe the time. + // this means we can safely share this event without mentioning other recipients. + batch = Batch.empty(protocolVersion), + // as we typically won't know the domain time at the point of doing this request (hence doing the request for the time...), + // we can't pick a known good domain time for the max sequencing time. + // if we were to guess it we may get it wrong and then in the event of no activity on the domain for our recipient, + // we'd then never actually learn of the time. + // so instead we just use the maximum value allowed. + maxSequencingTime = CantonTimestamp.MaxValue, + messageId = mkTimeProofRequestMessageId, + ) + + /** Use a constant prefix for a message which would permit the sequencer to track how many + * time request events it is receiving. + */ + val timeEventMessageIdPrefix = "tick-" + private def isTimeEventMessageId(messageId: MessageId): Boolean = + messageId.unwrap.startsWith(timeEventMessageIdPrefix) + private def isTimeEventBatch(batch: Batch[_]): Boolean = + batch.envelopes.isEmpty // should be entirely empty + + /** Make a unique message id for a time event submission request. + * Currently adding a short prefix for debugging at the sequencer so floods of time requests will be observable. + */ + @VisibleForTesting + def mkTimeProofRequestMessageId: MessageId = + MessageId( + String73(s"$timeEventMessageIdPrefix${UUID.randomUUID()}")("time-proof-message-id".some) + ) + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/TimeProofRequestSubmitter.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/TimeProofRequestSubmitter.scala new file mode 100644 index 0000000000..768243a62f --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/time/TimeProofRequestSubmitter.scala @@ -0,0 +1,161 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.time + +import cats.data.EitherT +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.config.{ProcessingTimeout, TimeProofRequestConfig} +import com.digitalasset.canton.lifecycle.FlagCloseable +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.sequencing.client.{SendAsyncClientError, SequencerClient} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.retry.RetryUtil.AllExnRetryable +import com.digitalasset.canton.util.retry.{Backoff, Success} +import com.digitalasset.canton.util.{FutureUtil, HasFlushFuture, retry} +import com.digitalasset.canton.version.ProtocolVersion +import com.google.common.annotations.VisibleForTesting + +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.{ExecutionContext, Future} + +/** Use [[fetchTimeProof]] to fetch a time proof we observe from the sequencer via [[handleTimeProof]]. + * Will batch fetch calls so there is only a single request occurring at any point. + * + * The submission of this request to the sequencer is slightly more involved than usual as we do not rely at all + * on domain time as this component is primarily used when the domain time is likely unknown or stale. + * Instead we use the the local node clock for retries. + * + * Future optimizations: + * - Most scenarios don't need a time event specifically and instead just need any event to cause a "tick". + * In these cases we could short circuit and cancel a pending request when receiving any event with a timestamp. + * However this would only optimize our retry loop so the distinction doesn't currently feel anywhere near worthwhile. + */ +trait TimeProofRequestSubmitter extends AutoCloseable { + + /** The [[TimeProofRequestSubmitter]] will attempt to produce a time proof by calling send on the domain sequencer. + * It will stop requesting a time proof with the first time proof it witnesses (not necessarily the one + * it requested). + * Ensures that only a single request is in progress at a time regardless of how many times it is called. + * Is safe to call frequently without causing many requests to the sequencer. + * If the component is shutdown it stops requesting a time proof. + */ + def fetchTimeProof()(implicit traceContext: TraceContext): Unit + + /** Update state based on time proof events observed from the sequencer */ + def handleTimeProof(proof: TimeProof): Unit +} + +private[time] class TimeProofRequestSubmitterImpl( + config: TimeProofRequestConfig, + sendRequest: TraceContext => EitherT[Future, SendAsyncClientError, Unit], + clock: Clock, + protected val timeouts: ProcessingTimeout, + protected val loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext) + extends TimeProofRequestSubmitter + with NamedLogging + with FlagCloseable + with HasFlushFuture { + import com.digitalasset.canton.time.TimeProofRequestSubmitterImpl.* + + private val currentRequestToken: AtomicReference[Token] = + new AtomicReference[Token](NoCurrentRequest) + + override def fetchTimeProof()(implicit traceContext: TraceContext): Unit = { + val newToken = new Object + if (currentRequestToken.compareAndSet(NoCurrentRequest, newToken)) { + sendRequestIfPending(newToken) + } + } + + override def handleTimeProof(proof: TimeProof): Unit = { + val token = currentRequestToken.getAndSet(NoCurrentRequest) + if (token != NoCurrentRequest) { + logger.debug(s"Received $proof")(proof.traceContext) + } + } + + private def sendRequestIfPending(token: Token)(implicit traceContext: TraceContext): Unit = { + def stillPending: Boolean = !isClosing && currentRequestToken.get() == token + + /* Make the request or short circuit if we're no longer waiting a time event */ + def mkRequest(): Future[Either[SendAsyncClientError, Unit]] = + performUnlessClosingF(functionFullName) { + if (stillPending) { + logger.debug("Sending time request") + sendRequest(traceContext).value + } else Future.successful(Right(())) + }.onShutdown(Right(())) + + def eventuallySendRequest(): Unit = { + performUnlessClosing("unless closing, sendRequestIfPending") { + addToFlushAndLogError( + s"sendRequestIfPending scheduled ${config.maxSequencingDelay} after ${clock.now}" + ) { + { + import Success.either + val retrySendTimeRequest = Backoff( + logger, + this, + retry.Forever, + config.initialRetryDelay.underlying, + config.maxRetryDelay.underlying, + "request current time", + ) + + retrySendTimeRequest(mkRequest(), AllExnRetryable) map { _ => + // if we still care about the outcome (we could have witnessed a recent time while sending the request), + // then schedule retrying a new request. + // this will short circuit if a new timestamp is not needed at that point. + if (stillPending) { + // intentionally don't wait for future + FutureUtil.doNotAwait( + clock + .scheduleAfter( + _ => eventuallySendRequest(), + config.maxSequencingDelay.asJava, + ) + .onShutdown(()), + "requesting current domain time", + ) + } + } + } + } + }.onShutdown( + // using instead of discard to highlight that this change goes with reducing activity during shutdown + () + ) + } + + // initial kick off + eventuallySendRequest() + } + + @VisibleForTesting + protected[time] def flush(): Future[Unit] = doFlush() +} + +object TimeProofRequestSubmitter { + def apply( + config: TimeProofRequestConfig, + clock: Clock, + sequencerClient: SequencerClient, + protocolVersion: ProtocolVersion, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit executionContext: ExecutionContext): TimeProofRequestSubmitter = + new TimeProofRequestSubmitterImpl( + config, + TimeProof.sendRequest(sequencerClient, protocolVersion)(_), + clock, + timeouts, + loggerFactory, + ) +} + +object TimeProofRequestSubmitterImpl { + private[TimeProofRequestSubmitterImpl] type Token = AnyRef + private[TimeProofRequestSubmitterImpl] val NoCurrentRequest: Token = new Object +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/DomainOutboxQueue.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/DomainOutboxQueue.scala new file mode 100644 index 0000000000..e91041da77 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/DomainOutboxQueue.scala @@ -0,0 +1,69 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology + +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.blocking + +/** The [[DomainOutboxQueue]] connects a [[DomainTopologyManagerX]] and a `DomainOutboxX`. + * The topology manager enqueues transactions that the domain outbox will pick up and send + * to the domain to be sequenced and distributed to the nodes in the domain. + * + * On the one hand, [[com.digitalasset.canton.topology.DomainOutboxQueue#enqueue]] may be called at any point to add + * more topology transactions to the queue. On the other hand, each invocation of + * [[com.digitalasset.canton.topology.DomainOutboxQueue#dequeue]] must be followed by either + * [[com.digitalasset.canton.topology.DomainOutboxQueue#requeue]] or + * [[com.digitalasset.canton.topology.DomainOutboxQueue#completeCycle]], before + * [[com.digitalasset.canton.topology.DomainOutboxQueue#dequeue]] is called again. + */ +class DomainOutboxQueue(val loggerFactory: NamedLoggerFactory) extends NamedLogging { + + private val unsentQueue = new scala.collection.mutable.Queue[GenericSignedTopologyTransactionX] + private val pendingQueue = new scala.collection.mutable.Queue[GenericSignedTopologyTransactionX] + + /** To be called by the topology manager whenever new topology transactions have been validated. + */ + def enqueue( + txs: Seq[GenericSignedTopologyTransactionX] + )(implicit traceContext: TraceContext): Unit = blocking(synchronized { + logger.debug(s"enqueuing: $txs") + unsentQueue.enqueueAll(txs).discard + }) + + def size(): Int = blocking(synchronized(unsentQueue.size)) + + /** Marks up to `limit` transactions as pending and returns those transactions. + * @param limit batch size + * @return the topology transactions that have been marked as pending. + */ + def dequeue(limit: Int): Seq[GenericSignedTopologyTransactionX] = blocking(synchronized { + logger.debug("dequeuing")(TraceContext.todo) + require( + pendingQueue.isEmpty, + s"tried to dequeue while pending wasn't empty: ${pendingQueue.toSeq}", + ) + pendingQueue.enqueueAll(unsentQueue.take(limit)) + unsentQueue.remove(0, limit) + pendingQueue.toSeq + }) + + /** Marks the currently pending transactions as unsent and adds them to the front of the queue in the same order. + */ + def requeue(): Unit = blocking(synchronized { + logger.debug(s"requeuing $pendingQueue")(TraceContext.todo) + unsentQueue.prependAll(pendingQueue) + pendingQueue.clear() + }) + + /** Clears the currently pending transactions. + */ + def completeCycle(): Unit = blocking(synchronized { + logger.debug("completeCycle")(TraceContext.todo) + pendingQueue.clear() + }) + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/Identifier.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/Identifier.scala new file mode 100644 index 0000000000..ec9a63df44 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/Identifier.scala @@ -0,0 +1,185 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology + +import cats.Order +import cats.implicits.* +import com.digitalasset.canton.ProtoDeserializationError.ValueConversionError +import com.digitalasset.canton.config.CantonRequireTypes.{ + LengthLimitedStringWrapper, + String185, + String255, + String68, +} +import com.digitalasset.canton.crypto.Fingerprint +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.store.db.DbDeserializationException +import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.canton.{LfPartyId, checked} +import io.circe.Encoder +import slick.jdbc.{GetResult, SetParameter} + +/** utility class to ensure that strings conform to LF specification minus our internal delimiter */ +object SafeSimpleString { + + val delimiter = "::" + + def fromProtoPrimitive(str: String): Either[String, String] = { + for { + _ <- LfPartyId.fromString(str) + opt <- Either.cond( + !str.contains(delimiter), + str, + s"String contains reserved delimiter `$delimiter`.", + ) + } yield opt + } + +} + +/** An identifier such as a random or a readable string + */ +final case class Identifier private (protected val str: String185) + extends LengthLimitedStringWrapper + with PrettyPrinting { + def toLengthLimitedString: String185 = str + + override def pretty: Pretty[Identifier] = prettyOfString(_.unwrap) +} + +object Identifier { + def create(str: String): Either[String, Identifier] = + for { + idString <- SafeSimpleString.fromProtoPrimitive(str) + string185 <- String185.create(idString) + } yield new Identifier(string185) + + def tryCreate(str: String): Identifier = + create(str).valueOr(err => throw new IllegalArgumentException(s"Invalid identifier $str: $err")) + + def fromProtoPrimitive(str: String): Either[String, Identifier] = create(str) + + implicit val getResultIdentifier: GetResult[Identifier] = GetResult { r => + Identifier + .fromProtoPrimitive(r.nextString()) + .valueOr(err => + throw new DbDeserializationException(s"Failed to deserialize Identifier: $err") + ) + } + + implicit val setParameterIdentifier: SetParameter[Identifier] = (v, pp) => + pp >> v.toLengthLimitedString + implicit val setParameterIdentifierOption: SetParameter[Option[Identifier]] = (v, pp) => + pp >> v.map(_.toLengthLimitedString) + + implicit val namespaceOrder: Order[Identifier] = Order.by[Identifier, String](_.unwrap) + + implicit val domainAliasEncoder: Encoder[Identifier] = + Encoder.encodeString.contramap[Identifier](_.unwrap) + +} + +object Namespace { + implicit val setParameterNamespace: SetParameter[Namespace] = (v, pp) => + pp >> v.toLengthLimitedString + implicit val namespaceOrder: Order[Namespace] = Order.by[Namespace, String](_.unwrap) + implicit val setParameterOptionNamespace: SetParameter[Option[Namespace]] = (v, pp) => + pp >> v.map(_.toLengthLimitedString) +} + +// architecture-handbook-entry-begin: UniqueIdentifier +/** A namespace spanned by the fingerprint of a pub-key + * + * This is based on the assumption that the fingerprint is unique to the public-key + */ +final case class Namespace(fingerprint: Fingerprint) extends PrettyPrinting { + def unwrap: String = fingerprint.unwrap + def toProtoPrimitive: String = fingerprint.toProtoPrimitive + def toLengthLimitedString: String68 = fingerprint.toLengthLimitedString + override def pretty: Pretty[Namespace] = prettyOfParam(_.fingerprint) +} + +/** a unique identifier within a namespace + * Based on the Ledger API PartyIds/LedgerStrings being limited to 255 characters, we allocate + * - 64 + 4 characters to the namespace/fingerprint (essentially SHA256 with extra bytes), + * - 2 characters as delimiters, and + * - the last 185 characters for the Identifier. + */ +final case class UniqueIdentifier(id: Identifier, namespace: Namespace) extends PrettyPrinting { +// architecture-handbook-entry-end: UniqueIdentifier + def toProtoPrimitive: String = + id.toProtoPrimitive + SafeSimpleString.delimiter + namespace.toProtoPrimitive + + def toLengthLimitedString: String255 = checked(String255.tryCreate(toProtoPrimitive)) + + // utility to filter UIDs using prefixes obtained via UniqueIdentifier.splitFilter() below + def matchesPrefixes(idPrefix: String, nsPrefix: String): Boolean = + id.toProtoPrimitive.startsWith(idPrefix) && namespace.toProtoPrimitive.startsWith(nsPrefix) + + override def pretty: Pretty[this.type] = + prettyOfString(uid => uid.id.show + SafeSimpleString.delimiter + uid.namespace.show) +} + +object UniqueIdentifier { + + def tryCreate(id: String, fingerprint: String): UniqueIdentifier = + UniqueIdentifier(Identifier.tryCreate(id), Namespace(Fingerprint.tryCreate(fingerprint))) + + def tryFromProtoPrimitive(str: String): UniqueIdentifier = + fromProtoPrimitive_(str).valueOr(e => throw new IllegalArgumentException(e)) + + def fromProtoPrimitive_(str: String): Either[String, UniqueIdentifier] = { + val pos = str.indexOf(SafeSimpleString.delimiter) + if (pos > 0) { + val s1 = str.substring(0, pos) + val s2 = str.substring(pos + 2) + for { + idf <- Identifier + .fromProtoPrimitive(s1) + .leftMap(x => s"Identifier decoding of `${str.limit(200)}` failed with: $x") + fp <- Fingerprint + .fromProtoPrimitive(s2) + .leftMap(x => s"Fingerprint decoding of `${str.limit(200)}` failed with: $x") + } yield UniqueIdentifier(idf, Namespace(fp)) + } else if (pos == 0) { + Left(s"Invalid unique identifier `$str` with empty identifier.") + } else if (str.isEmpty) { + Left(s"Empty string is not a valid unique identifier.") + } else { + Left(s"Invalid unique identifier `$str` with missing namespace.") + } + } + + def fromProtoPrimitive( + uid: String, + fieldName: String, + ): ParsingResult[UniqueIdentifier] = + fromProtoPrimitive_(uid).leftMap(ValueConversionError(fieldName, _)) + + // slick instance for deserializing unique identifiers + // not an implicit because we shouldn't ever need to parse a raw unique identifier + val getResult: GetResult[UniqueIdentifier] = GetResult(r => deserializeFromDb(r.nextString())) + val getResultO: GetResult[Option[UniqueIdentifier]] = + GetResult(r => r.nextStringOption().map(deserializeFromDb)) + implicit val setParameterUid: SetParameter[UniqueIdentifier] = (v, pp) => + pp >> v.toLengthLimitedString + + /** @throws com.digitalasset.canton.store.db.DbDeserializationException if the string is not a valid unqiue identifier */ + def deserializeFromDb(uid: String): UniqueIdentifier = + fromProtoPrimitive_(uid).valueOr(err => + throw new DbDeserializationException(s"Failed to parse a unique ID $uid: $err") + ) + + /** Split an uid filter into the two subparts */ + def splitFilter(filter: String, append: String = ""): (String, String) = { + val items = filter.split(SafeSimpleString.delimiter) + val prefix = items(0) + if (items.lengthCompare(1) > 0) { + val suffix = items(1) + (prefix ++ append, suffix ++ append) + } else (prefix ++ append, append) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/KeyCollection.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/KeyCollection.scala new file mode 100644 index 0000000000..0b4ed05b42 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/KeyCollection.scala @@ -0,0 +1,36 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology + +import com.digitalasset.canton.crypto.{EncryptionPublicKey, KeyPurpose, PublicKey, SigningPublicKey} + +final case class KeyCollection( + signingKeys: Seq[SigningPublicKey], + encryptionKeys: Seq[EncryptionPublicKey], +) { + + def forPurpose(purpose: KeyPurpose): Seq[PublicKey] = purpose match { + case KeyPurpose.Signing => signingKeys + case KeyPurpose.Encryption => encryptionKeys + } + + def hasBothKeys(): Boolean = signingKeys.nonEmpty && encryptionKeys.nonEmpty + + def addTo(key: PublicKey): KeyCollection = (key: @unchecked) match { + case sigKey: SigningPublicKey => copy(signingKeys = signingKeys :+ sigKey) + case encKey: EncryptionPublicKey => copy(encryptionKeys = encryptionKeys :+ encKey) + } + + def removeFrom(key: PublicKey): KeyCollection = + (key: @unchecked) match { + case _: SigningPublicKey => copy(signingKeys = signingKeys.filter(_.id != key.id)) + case _: EncryptionPublicKey => copy(encryptionKeys = encryptionKeys.filter(_.id != key.id)) + } +} + +object KeyCollection { + + val empty: KeyCollection = KeyCollection(Seq(), Seq()) + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/MediatorRef.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/MediatorRef.scala new file mode 100644 index 0000000000..b948873f6b --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/MediatorRef.scala @@ -0,0 +1,73 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology + +import com.digitalasset.canton.ProtoDeserializationError.InvariantViolation +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.sequencing.protocol.{MediatorsOfDomain, MemberRecipient, Recipient} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult + +/** This class represents a union of [[MediatorId]] and [[com.digitalasset.canton.sequencing.protocol.MediatorsOfDomain]]. + * It is used throughout the protocol to represent target/source mediator or mediator group in case of a BFT domain + */ +sealed trait MediatorRef extends PrettyPrinting with Product with Serializable { + def toProtoPrimitive: String + + def toRecipient: Recipient + + def isGroup: Boolean = false + + def isSingle: Boolean = false +} + +object MediatorRef { + final case class Single(mediatorId: MediatorId) extends MediatorRef { + override def toProtoPrimitive: String = mediatorId.toProtoPrimitive + + override def toRecipient: Recipient = MemberRecipient(mediatorId) + + override def isSingle: Boolean = true + + override def pretty: Pretty[Single] = prettyOfParam(_.mediatorId) + } + + final case class Group(mediatorsOfDomain: MediatorsOfDomain) extends MediatorRef { + override def toProtoPrimitive: String = mediatorsOfDomain.toProtoPrimitive + + override def toRecipient: Recipient = mediatorsOfDomain + + override def isGroup: Boolean = true + + override def pretty: Pretty[Group] = prettyOfParam(_.mediatorsOfDomain) + } + + def apply(mediatorId: MediatorId): MediatorRef = Single(mediatorId) + + def apply(mediatorsOfDomain: MediatorsOfDomain): MediatorRef = Group(mediatorsOfDomain) + + def apply(mediatorGroup: MediatorGroup): MediatorRef = { + val MediatorGroup(_, active, _, threshold) = mediatorGroup + if (active.sizeIs == 1 && threshold.value == 1) { + MediatorRef(active(0)) + } else { + MediatorRef(MediatorsOfDomain(mediatorGroup.index)) + } + } + + def fromProtoPrimitive( + recipient: String, + fieldName: String, + ): ParsingResult[MediatorRef] = { + Recipient.fromProtoPrimitive(recipient, fieldName).flatMap { + case MemberRecipient(mediatorId: MediatorId) => Right(MediatorRef(mediatorId)) + case mod @ MediatorsOfDomain(_) => Right(MediatorRef(mod)) + case _ => + Left( + InvariantViolation( + s"MediatorRecipient only allows MED or MOD recipients for the field $fieldName, found: $recipient" + ) + ) + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/Member.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/Member.scala new file mode 100644 index 0000000000..1f78c807a3 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/Member.scala @@ -0,0 +1,448 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology + +import cats.kernel.Order +import cats.syntax.either.* +import com.daml.ledger.javaapi.data.Party +import com.digitalasset.canton.ProtoDeserializationError.ValueConversionError +import com.digitalasset.canton.config.CantonRequireTypes.{String255, String3, String300} +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.crypto.RandomOps +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.store.db.DbDeserializationException +import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex +import com.digitalasset.canton.util.HexString +import com.digitalasset.canton.{LedgerParticipantId, LfPartyId, ProtoDeserializationError} +import com.google.common.annotations.VisibleForTesting +import io.circe.Encoder +import slick.jdbc.{GetResult, PositionedParameters, SetParameter} + +/** Top level trait representing an identity within the system */ +sealed trait Identity extends Product with Serializable with PrettyPrinting { + def uid: UniqueIdentifier + + def toProtoPrimitive: String = uid.toProtoPrimitive + + /** returns the string representation used in console filters (maps to the uid) */ + def filterString: String = uid.toProtoPrimitive + + override def pretty: Pretty[this.type] = prettyOfParam(_.uid) +} + +sealed trait NodeIdentity extends Identity { + def member: Member +} + +sealed trait MemberCode { + + def threeLetterId: String3 + + def toProtoPrimitive: String = threeLetterId.unwrap + +} + +object MemberCode { + + def fromProtoPrimitive_(code: String): Either[String, MemberCode] = + String3.create(code).flatMap { + case MediatorId.Code.threeLetterId => Right(MediatorId.Code) + case DomainTopologyManagerId.Code.threeLetterId => Right(DomainTopologyManagerId.Code) + case ParticipantId.Code.threeLetterId => Right(ParticipantId.Code) + case SequencerId.Code.threeLetterId => Right(SequencerId.Code) + case UnauthenticatedMemberId.Code.threeLetterId => Right(UnauthenticatedMemberId.Code) + case _ => Left(s"Unknown three letter type $code") + } + + def fromProtoPrimitive( + code: String, + field: String, + ): ParsingResult[MemberCode] = + fromProtoPrimitive_(code).leftMap(ValueConversionError(field, _)) + +} + +/** A member in a domain such as a participant and or domain entities + * + * A member can be addressed and talked to on the transaction level + * through the sequencer. + */ +sealed trait Member extends Identity with Product with Serializable { + + def code: MemberCode + + def isAuthenticated: Boolean + + override def toProtoPrimitive: String = toLengthLimitedString.unwrap + + def toLengthLimitedString: String300 = + String300.tryCreate( + s"${code.threeLetterId.unwrap}${SafeSimpleString.delimiter}${uid.toProtoPrimitive}" + ) + + override def pretty: Pretty[Member] = + prettyOfString(inst => + inst.code.threeLetterId.unwrap + SafeSimpleString.delimiter + inst.uid.show + ) +} + +object Member { + + def fromProtoPrimitive_(member: String): Either[String, Member] = { + // The first three letters of the string identify the type of member + val (typ, uidS) = member.splitAt(3) + + def mapToType(code: MemberCode, uid: UniqueIdentifier): Either[String, Member] = { + code match { + case MediatorId.Code => Right(MediatorId(uid)) + case DomainTopologyManagerId.Code => Right(DomainTopologyManagerId(uid)) + case ParticipantId.Code => Right(ParticipantId(uid)) + case SequencerId.Code => Right(SequencerId(uid)) + case UnauthenticatedMemberId.Code => Right(UnauthenticatedMemberId(uid)) + } + } + + // expecting COD:: + val dlen = SafeSimpleString.delimiter.length + + for { + _ <- Either.cond( + member.length > 3 + (2 * dlen), + (), + s"Invalid member `$member`, expecting ::id::fingerprint.", + ) + _ <- Either.cond( + member.substring(3, 3 + dlen) == SafeSimpleString.delimiter, + (), + s"Expected delimiter ${SafeSimpleString.delimiter} after three letter code of `$member`", + ) + code <- MemberCode.fromProtoPrimitive_(typ) + uid <- UniqueIdentifier.fromProtoPrimitive_(uidS.substring(dlen)) + member <- mapToType(code, uid) + } yield member + } + + def fromProtoPrimitive( + member: String, + fieldName: String, + ): ParsingResult[Member] = + fromProtoPrimitive_(member).leftMap(ValueConversionError(fieldName, _)) + + // Use the same ordering as for what we use in the database + implicit val memberOrdering: Ordering[Member] = Ordering.by(_.toLengthLimitedString.unwrap) + + /** Instances for slick to set and get members. + * Not exposed by default as other types derived from [[Member]] have their own persistence schemes ([[ParticipantId]]). + */ + object DbStorageImplicits { + implicit val setParameterMember: SetParameter[Member] = (v: Member, pp) => + pp >> v.toLengthLimitedString + + implicit val getResultMember: GetResult[Member] = GetResult(r => { + Member + .fromProtoPrimitive_(r.nextString()) + .valueOr(err => throw new DbDeserializationException(err)) + }) + } +} + +sealed trait AuthenticatedMember extends Member { + override def code: AuthenticatedMemberCode + override def isAuthenticated: Boolean = true +} + +sealed trait AuthenticatedMemberCode extends MemberCode + +final case class UnauthenticatedMemberId(uid: UniqueIdentifier) extends Member { + override def code: MemberCode = UnauthenticatedMemberId.Code + override def isAuthenticated: Boolean = false +} + +object UnauthenticatedMemberId { + object Code extends MemberCode { + val threeLetterId: String3 = String3.tryCreate("UNM") + } + + private val RandomIdentifierNumberOfBytes = 20 + + def tryCreate(namespace: Namespace)(randomOps: RandomOps): UnauthenticatedMemberId = + UnauthenticatedMemberId( + UniqueIdentifier.tryCreate( + HexString.toHexString(randomOps.generateRandomByteString(RandomIdentifierNumberOfBytes)), + namespace.fingerprint.unwrap, + ) + ) +} + +final case class DomainId(uid: UniqueIdentifier) extends NodeIdentity { + def unwrap: UniqueIdentifier = uid + def toLengthLimitedString: String255 = uid.toLengthLimitedString + + // The member and member of a domain identity is the domain topology manager + override def member: Member = DomainTopologyManagerId(uid) +} + +object DomainId { + + implicit val orderDomainId: Order[DomainId] = Order.by[DomainId, String](_.toProtoPrimitive) + implicit val domainIdEncoder: Encoder[DomainId] = + Encoder.encodeString.contramap(_.unwrap.toProtoPrimitive) + + // Instances for slick (db) queries + implicit val getResultDomainId: GetResult[DomainId] = + UniqueIdentifier.getResult.andThen(DomainId(_)) + + implicit val getResultDomainIdO: GetResult[Option[DomainId]] = + UniqueIdentifier.getResultO.andThen(_.map(DomainId(_))) + + implicit val setParameterDomainId: SetParameter[DomainId] = + (d: DomainId, pp: PositionedParameters) => pp >> d.toLengthLimitedString + implicit val setParameterDomainIdO: SetParameter[Option[DomainId]] = + (d: Option[DomainId], pp: PositionedParameters) => pp >> d.map(_.toLengthLimitedString) + + def fromProtoPrimitive( + proto: String, + fieldName: String, + ): ParsingResult[DomainId] = + UniqueIdentifier.fromProtoPrimitive(proto, fieldName).map(DomainId(_)) + + def tryFromString(str: String): DomainId = DomainId(UniqueIdentifier.tryFromProtoPrimitive(str)) + + def fromString(str: String): Either[String, DomainId] = + UniqueIdentifier.fromProtoPrimitive_(str).map(DomainId(_)) + +} + +/** A participant identifier */ +final case class ParticipantId(uid: UniqueIdentifier) + extends AuthenticatedMember + with NodeIdentity { + + override def code: AuthenticatedMemberCode = ParticipantId.Code + + def adminParty: PartyId = PartyId(uid) + def toLf: LedgerParticipantId = LedgerParticipantId.assertFromString(uid.toProtoPrimitive) + + override def member: Member = this +} + +object ParticipantId { + object Code extends AuthenticatedMemberCode { + val threeLetterId: String3 = String3.tryCreate("PAR") + } + def apply(identifier: Identifier, namespace: Namespace): ParticipantId = + ParticipantId(UniqueIdentifier(identifier, namespace)) + + /** create a participant from a string + * + * used in testing + */ + @VisibleForTesting + def apply(addr: String): ParticipantId = { + ParticipantId(UniqueIdentifier.tryCreate(addr, "default")) + } + + implicit val ordering: Ordering[ParticipantId] = Ordering.by(_.uid.toProtoPrimitive) + + def fromProtoPrimitive( + proto: String, + fieldName: String, + ): ParsingResult[ParticipantId] = + Member.fromProtoPrimitive(proto, fieldName).flatMap { + case x: ParticipantId => Right(x) + case y => + Left( + ProtoDeserializationError + .ValueDeserializationError(fieldName, s"Value $y is not of type `ParticipantId`") + ) + } + + def fromLfParticipant(lfParticipant: LedgerParticipantId): Either[String, ParticipantId] = + UniqueIdentifier.fromProtoPrimitive_(lfParticipant).map(ParticipantId(_)) + + def tryFromLfParticipant(lfParticipant: LedgerParticipantId): ParticipantId = + fromLfParticipant(lfParticipant).fold( + e => throw new IllegalArgumentException(e), + Predef.identity, + ) + + def tryFromProtoPrimitive(str: String): ParticipantId = + fromProtoPrimitive(str, "").fold( + err => throw new IllegalArgumentException(err.message), + identity, + ) + + // Instances for slick (db) queries + implicit val getResultParticipantId: GetResult[ParticipantId] = + UniqueIdentifier.getResult.andThen(ParticipantId(_)) + implicit val setParameterParticipantId: SetParameter[ParticipantId] = + (p: ParticipantId, pp: PositionedParameters) => pp >> p.uid.toLengthLimitedString +} + +/** A party identifier based on a unique identifier + */ +final case class PartyId(uid: UniqueIdentifier) extends Identity { + + def toLf: LfPartyId = LfPartyId.assertFromString(uid.toProtoPrimitive) + + def toParty: Party = new Party(toLf) +} + +object PartyId { + + implicit val ordering: Ordering[PartyId] = Ordering.by(x => x.toProtoPrimitive) + implicit val getResultPartyId: GetResult[PartyId] = + UniqueIdentifier.getResult.andThen(PartyId(_)) + implicit val setParameterPartyId: SetParameter[PartyId] = + (p: PartyId, pp: PositionedParameters) => pp >> p.uid.toLengthLimitedString + + def apply(identifier: Identifier, namespace: Namespace): PartyId = + PartyId(UniqueIdentifier(identifier, namespace)) + + def fromLfParty(lfParty: LfPartyId): Either[String, PartyId] = + UniqueIdentifier.fromProtoPrimitive_(lfParty).map(PartyId(_)) + + def tryFromLfParty(lfParty: LfPartyId): PartyId = + fromLfParty(lfParty) match { + case Right(partyId) => partyId + case Left(e) => throw new IllegalArgumentException(e) + } + + def fromProtoPrimitive(str: String, fieldName: String): ParsingResult[PartyId] = (for { + lfPartyId <- LfPartyId.fromString(str) + partyId <- fromLfParty(lfPartyId) + } yield partyId).leftMap(ValueConversionError(fieldName, _)) + + def tryFromProtoPrimitive(str: String): PartyId = PartyId( + UniqueIdentifier.tryFromProtoPrimitive(str) + ) + +} + +sealed trait DomainMember extends AuthenticatedMember + +object DomainMember { + + /** List domain members for the given id, optionally including the sequencer. * */ + def list(id: DomainId, includeSequencer: Boolean): Set[DomainMember] = { + // TODO(i7992) remove static mediator id + val baseMembers = Set[DomainMember](DomainTopologyManagerId(id), MediatorId(id)) + if (includeSequencer) baseMembers + SequencerId(id) + else baseMembers + } + + /** List all domain members always including the sequencer. */ + def listAll(id: DomainId): Set[DomainMember] = list(id, includeSequencer = true) + +} + +/** @param index uniquely identifies the group, just like [[MediatorId]] for single mediators. + * @param active the active mediators belonging to the group + * @param passive the passive mediators belonging to the group + * @param threshold the minimum size of a quorum + */ +final case class MediatorGroup( + index: MediatorGroupIndex, + active: Seq[MediatorId], + passive: Seq[MediatorId], + threshold: PositiveInt, +) { + def isActive: Boolean = active.size >= threshold.value + + def all: Seq[MediatorId] = active ++ passive +} + +object MediatorGroup { + type MediatorGroupIndex = NonNegativeInt +} + +final case class MediatorId(uid: UniqueIdentifier) extends DomainMember with NodeIdentity { + override def code: AuthenticatedMemberCode = MediatorId.Code + + override def member: Member = this +} + +object MediatorId { + object Code extends AuthenticatedMemberCode { + val threeLetterId = String3.tryCreate("MED") + } + + def apply(identifier: Identifier, namespace: Namespace): MediatorId = + MediatorId(UniqueIdentifier(identifier, namespace)) + + def apply(domainId: DomainId): MediatorId = MediatorId(domainId.unwrap) + + def fromProtoPrimitive( + mediatorId: String, + fieldName: String, + ): ParsingResult[MediatorId] = Member.fromProtoPrimitive(mediatorId, fieldName).flatMap { + case medId: MediatorId => Right(medId) + case _ => + Left( + ProtoDeserializationError + .ValueDeserializationError(fieldName, s"Value `$mediatorId` is not of type MediatorId") + ) + } + +} + +/** The domain topology manager id + * + * The domain manager is the topology manager of the domain. The read side + * of the domain manager is the IdentityProvidingService. + */ +final case class DomainTopologyManagerId(uid: UniqueIdentifier) extends DomainMember { + override def code: AuthenticatedMemberCode = DomainTopologyManagerId.Code + lazy val domainId: DomainId = DomainId(uid) +} + +object DomainTopologyManagerId { + + object Code extends AuthenticatedMemberCode { + val threeLetterId = String3.tryCreate("DOM") + } + + def apply(identifier: Identifier, namespace: Namespace): DomainTopologyManagerId = + DomainTopologyManagerId(UniqueIdentifier(identifier, namespace)) + + def apply(domainId: DomainId): DomainTopologyManagerId = DomainTopologyManagerId(domainId.unwrap) +} + +final case class SequencerGroup( + active: Seq[SequencerId], + passive: Seq[SequencerId], + threshold: PositiveInt, +) + +final case class SequencerId(uid: UniqueIdentifier) extends DomainMember with NodeIdentity { + override def code: AuthenticatedMemberCode = SequencerId.Code + + override def member: Member = this +} + +object SequencerId { + + object Code extends AuthenticatedMemberCode { + val threeLetterId = String3.tryCreate("SEQ") + } + + def apply(identifier: Identifier, namespace: Namespace): SequencerId = + SequencerId(UniqueIdentifier(identifier, namespace)) + + def apply(domainId: DomainId): SequencerId = SequencerId(domainId.unwrap) + + def fromProtoPrimitive( + proto: String, + fieldName: String, + ): ParsingResult[SequencerId] = + Member.fromProtoPrimitive(proto, fieldName).flatMap { + case x: SequencerId => Right(x) + case y => + Left( + ProtoDeserializationError + .ValueDeserializationError(fieldName, s"Value $y is not of type `SequencerId`") + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManager.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManager.scala new file mode 100644 index 0000000000..e8a6a75b53 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManager.scala @@ -0,0 +1,634 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology + +import cats.data.EitherT +import cats.syntax.either.* +import cats.syntax.parallel.* +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.data.CantonTimestamp.now +import com.digitalasset.canton.error.* +import com.digitalasset.canton.lifecycle.{ + AsyncOrSyncCloseable, + FlagCloseableAsync, + FutureUnlessShutdown, + SyncCloseable, +} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.protocol.DynamicDomainParameters +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.topology.TopologyManagerError.IncreaseOfLedgerTimeRecordTimeTolerance +import com.digitalasset.canton.topology.processing.{ + EffectiveTime, + IncomingTopologyTransactionAuthorizationValidator, + SequencedTime, + SnapshotAuthorizationValidator, +} +import com.digitalasset.canton.topology.store.* +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.FutureInstances.* +import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.canton.util.{MonadUtil, SimpleExecutionQueue} +import com.digitalasset.canton.version.ProtocolVersion + +import scala.concurrent.{ExecutionContext, Future} + +abstract class TopologyManager[E <: CantonError]( + val clock: Clock, + val crypto: Crypto, + protected val store: TopologyStore[TopologyStoreId], + timeouts: ProcessingTimeout, + protocolVersion: ProtocolVersion, + protected val loggerFactory: NamedLoggerFactory, + futureSupervisor: FutureSupervisor, +)(implicit ec: ExecutionContext) + extends TopologyManagerStatus + with NamedLogging + with FlagCloseableAsync { + + protected val validator = + new IncomingTopologyTransactionAuthorizationValidator( + crypto.pureCrypto, + store, + None, + loggerFactory.append("role", "manager"), + ) + + /** returns the current queue size (how many changes are being processed) */ + override def queueSize: Int = sequentialQueue.queueSize + + protected def checkTransactionNotAddedBefore( + transaction: SignedTopologyTransaction[TopologyChangeOp] + )(implicit traceContext: TraceContext): EitherT[Future, TopologyManagerError, Unit] = { + val ret = store + .exists(transaction) + .map(x => + Either.cond( + !x, + (), + TopologyManagerError.DuplicateTransaction + .Failure(transaction.transaction, transaction.key.fingerprint), + ) + ) + EitherT(ret) + } + + protected def checkRemovalRefersToExisingTx( + transaction: SignedTopologyTransaction[TopologyChangeOp] + )(implicit traceContext: TraceContext): EitherT[Future, TopologyManagerError, Unit] = + if ( + transaction.operation == TopologyChangeOp.Add || transaction.operation == TopologyChangeOp.Replace + ) + EitherT.rightT(()) + else { + for { + active <- EitherT.right( + store.findPositiveTransactionsForMapping(transaction.transaction.element.mapping) + ) + filtered = active.find(sit => sit.transaction.element == transaction.transaction.element) + _ <- EitherT.cond[Future]( + filtered.nonEmpty, + (), + TopologyManagerError.NoCorrespondingActiveTxToRevoke.Element( + transaction.transaction.element + ): TopologyManagerError, + ) + } yield () + } + + protected def keyRevocationIsNotDangerous( + owner: Member, + key: PublicKey, + elementId: TopologyElementId, + force: Boolean, + )(implicit traceContext: TraceContext): EitherT[Future, TopologyManagerError, Unit] = { + lazy val removingLastKeyMustBeForcedError: TopologyManagerError = + TopologyManagerError.RemovingLastKeyMustBeForced.Failure(key.fingerprint, key.purpose) + + for { + txs <- EitherT.right( + store.findPositiveTransactions( + // Use the max timestamp so that we get the head state + CantonTimestamp.MaxValue, + asOfInclusive = true, + includeSecondary = false, + types = Seq(DomainTopologyTransactionType.OwnerToKeyMapping), + filterUid = Some(Seq(owner.uid)), + filterNamespace = None, + ) + ) + remaining = txs.toIdentityState.collect { + case TopologyStateUpdateElement(id, OwnerToKeyMapping(`owner`, remainingKey)) + if id != elementId && key.purpose == remainingKey.purpose => + key + } + _ <- + if (force && remaining.isEmpty) { + logger.info(s"Transaction will forcefully remove last ${key.purpose} of $owner") + EitherT.rightT[Future, TopologyManagerError](()) + } else EitherT.cond[Future](remaining.nonEmpty, (), removingLastKeyMustBeForcedError) + } yield () + } + + protected def keyRevocationDelegationIsNotDangerous( + transaction: SignedTopologyTransaction[TopologyChangeOp], + namespace: Namespace, + targetKey: SigningPublicKey, + force: Boolean, + removeFromCache: ( + SnapshotAuthorizationValidator, + StoredTopologyTransactions[TopologyChangeOp], + ) => EitherT[FutureUnlessShutdown, TopologyManagerError, Unit], + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyManagerError, Unit] = { + + lazy val unauthorizedTransaction: TopologyManagerError = + TopologyManagerError.UnauthorizedTransaction.Failure() + + lazy val removingKeyWithDanglingTransactionsMustBeForcedError: TopologyManagerError = + TopologyManagerError.RemovingKeyWithDanglingTransactionsMustBeForced + .Failure(targetKey.fingerprint, targetKey.purpose) + + val validatorSnap = + new SnapshotAuthorizationValidator(now(), store, timeouts, loggerFactory, futureSupervisor) + + for { + // step1: check if transaction is authorized + authorized <- EitherT.right[TopologyManagerError](validatorSnap.authorizedBy(transaction)) + _ <- + // not authorized + if (authorized.isEmpty) + EitherT.leftT[FutureUnlessShutdown, Unit](unauthorizedTransaction: TopologyManagerError) + // authorized + else if (!force) { + for { + // step2: find transaction that is going to be removed + storedTxsToRemove <- EitherT + .right[TopologyManagerError]( + store.findStoredNoSignature(transaction.transaction.reverse) + ) + .mapK(FutureUnlessShutdown.outcomeK) + + // step3: remove namespace delegation transaction from cache store + _ <- storedTxsToRemove.parTraverse { storedTxToRemove => + { + val wrapStoredTx = + new StoredTopologyTransactions[TopologyChangeOp](Seq(storedTxToRemove)) + removeFromCache(validatorSnap, wrapStoredTx) + } + } + + // step4: retrieve all transactions (possibly related with this namespace) + // TODO(i9809): this is risky for a big number of parties (i.e. 1M) + txs <- EitherT + .right( + store.findPositiveTransactions( + CantonTimestamp.MaxValue, + asOfInclusive = true, + includeSecondary = true, + types = DomainTopologyTransactionType.all, + filterUid = None, + filterNamespace = Some(Seq(namespace)), + ) + ) + .mapK(FutureUnlessShutdown.outcomeK) + + // step5: check if these transactions are still valid + _ <- txs.combine.result.parTraverse { txToCheck => + EitherT( + validatorSnap + .authorizedBy(txToCheck.transaction) + .map(res => + Either + .cond(res.nonEmpty, (), removingKeyWithDanglingTransactionsMustBeForcedError) + ) + ) + } + } yield () + } else + EitherT.rightT[FutureUnlessShutdown, TopologyManagerError](()) + } yield () + } + + protected def transactionIsNotDangerous( + transaction: SignedTopologyTransaction[TopologyChangeOp], + force: Boolean, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyManagerError, Unit] = + if (transaction.transaction.op == TopologyChangeOp.Add) + EitherT.rightT(()) + else { + transaction.transaction.element.mapping match { + case OwnerToKeyMapping(owner, key) => + keyRevocationIsNotDangerous(owner, key, transaction.transaction.element.id, force) + .mapK(FutureUnlessShutdown.outcomeK) + case NamespaceDelegation(namespace, targetKey, _) => + keyRevocationDelegationIsNotDangerous( + transaction, + namespace, + targetKey, + force, + { (validatorSnap, transaction) => + EitherT.right[TopologyManagerError]( + validatorSnap + .removeNamespaceDelegationFromCache(namespace, transaction) + ) + }, + ) + case IdentifierDelegation(uniqueKey, targetKey) => + keyRevocationDelegationIsNotDangerous( + transaction, + uniqueKey.namespace, + targetKey, + force, + { (validatorSnap, transaction) => + EitherT.right[TopologyManagerError]( + validatorSnap + .removeIdentifierDelegationFromCache(uniqueKey, transaction) + ) + }, + ) + case DomainParametersChange(_, newDomainParameters) if !force => + checkLedgerTimeRecordTimeToleranceNotIncreasing(newDomainParameters).mapK( + FutureUnlessShutdown.outcomeK + ) + case _ => EitherT.rightT(()) + } + } + + def signedMappingAlreadyExists( + mapping: TopologyMapping, + signingKey: Fingerprint, + )(implicit traceContext: TraceContext): Future[Boolean] = + for { + txs <- store.findPositiveTransactionsForMapping(mapping) + mappings = txs.map(x => (x.transaction.element.mapping, x.key.fingerprint)) + } yield mappings.contains((mapping, signingKey)) + + protected def checkMappingOfTxDoesNotExistYet( + transaction: SignedTopologyTransaction[TopologyChangeOp], + allowDuplicateMappings: Boolean, + )(implicit traceContext: TraceContext): EitherT[Future, TopologyManagerError, Unit] = + if (allowDuplicateMappings || transaction.transaction.op != TopologyChangeOp.Add) { + EitherT.rightT(()) + } else { + (for { + exists <- EitherT.right( + signedMappingAlreadyExists( + transaction.transaction.element.mapping, + transaction.key.fingerprint, + ) + ) + _ <- EitherT.cond[Future]( + !exists, + (), + TopologyManagerError.MappingAlreadyExists.Failure( + transaction.transaction.element, + transaction.key.fingerprint, + ): TopologyManagerError, + ) + } yield ()): EitherT[Future, TopologyManagerError, Unit] + } + + private def checkLedgerTimeRecordTimeToleranceNotIncreasing( + newDomainParameters: DynamicDomainParameters + )(implicit traceContext: TraceContext): EitherT[Future, TopologyManagerError, Unit] = { + // See i9028 for a detailed design. + + EitherT(for { + headTransactions <- store.headTransactions + } yield { + val domainParameters = headTransactions.toTopologyState + .collectFirst { case DomainGovernanceElement(DomainParametersChange(_, domainParameters)) => + domainParameters + } + .getOrElse(DynamicDomainParameters.initialValues(clock, protocolVersion)) + Either.cond( + domainParameters.ledgerTimeRecordTimeTolerance >= newDomainParameters.ledgerTimeRecordTimeTolerance, + (), + IncreaseOfLedgerTimeRecordTimeTolerance.TemporarilyInsecure( + domainParameters.ledgerTimeRecordTimeTolerance, + newDomainParameters.ledgerTimeRecordTimeTolerance, + ), + ) + }) + } + + protected def checkNewTransaction( + transaction: SignedTopologyTransaction[TopologyChangeOp], + force: Boolean, + )(implicit + traceContext: TraceContext + ): EitherT[Future, E, Unit] + + protected def build[Op <: TopologyChangeOp]( + transaction: TopologyTransaction[Op], + signingKey: Option[Fingerprint], + protocolVersion: ProtocolVersion, + )(implicit + traceContext: TraceContext + ): EitherT[Future, E, SignedTopologyTransaction[Op]] = { + for { + // find signing key + key <- signingKey match { + case Some(key) => EitherT.rightT[Future, E](key) + case None => signingKeyForTransactionF(transaction) + } + // fetch public key + pubkey <- crypto.cryptoPublicStore + .signingKey(key) + .leftMap(x => wrapError(TopologyManagerError.InternalError.CryptoPublicError(x))) + .subflatMap(_.toRight(wrapError(TopologyManagerError.PublicKeyNotInStore.Failure(key)))) + // create signed transaction + signed <- SignedTopologyTransaction + .create( + transaction, + pubkey, + crypto.pureCrypto, + crypto.privateCrypto, + protocolVersion, + ) + .leftMap { + case SigningError.UnknownSigningKey(keyId) => + wrapError(TopologyManagerError.SecretKeyNotInStore.Failure(keyId)) + case err => wrapError(TopologyManagerError.InternalError.TopologySigningError(err)) + } + } yield signed + } + + /** Authorizes a new topology transaction by signing it and adding it to the topology state + * + * @param transaction the transaction to be signed and added + * @param signingKey the key which should be used to sign + * @param protocolVersion the protocol version corresponding to the transaction + * @param force force dangerous operations, such as removing the last signing key of a participant + * @param replaceExisting if true and the transaction op is add, then we'll replace existing active mappings before adding the new + * @return the domain state (initialized or not initialized) or an error code of why the addition failed + */ + def authorize[Op <: TopologyChangeOp]( + transaction: TopologyTransaction[Op], + signingKey: Option[Fingerprint], + protocolVersion: ProtocolVersion, + force: Boolean = false, + replaceExisting: Boolean = false, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, E, SignedTopologyTransaction[Op]] = { + sequentialQueue.executeEUS( + { + logger.debug(show"Attempting to authorize ${transaction.element.mapping} with $signingKey") + for { + signed <- build(transaction, signingKey, protocolVersion).mapK( + FutureUnlessShutdown.outcomeK + ) + _ <- process(signed, force, replaceExisting, allowDuplicateMappings = false) + } yield signed + }, + "authorize transaction", + ) + } + + protected def signingKeyForTransactionF( + transaction: TopologyTransaction[TopologyChangeOp] + )(implicit traceContext: TraceContext): EitherT[Future, E, Fingerprint] = { + for { + // need to execute signing key finding sequentially, as the validator is expecting incremental in-memory updates + // to the "autohrization graph" + keys <- EitherT.right( + validator.getValidSigningKeysForMapping(clock.uniqueTime(), transaction.element.mapping) + ) + fingerprint <- findSigningKey(keys).leftMap(wrapError) + } yield fingerprint + } + + private def findSigningKey( + keys: Seq[Fingerprint] + )(implicit traceContext: TraceContext): EitherT[Future, TopologyManagerError, Fingerprint] = + keys.reverse.toList + .parFilterA(fingerprint => + crypto.cryptoPrivateStore + .existsSigningKey(fingerprint) + ) + .map(x => x.headOption) + .leftMap[TopologyManagerError](x => TopologyManagerError.InternalError.CryptoPrivateError(x)) + .subflatMap(_.toRight(TopologyManagerError.NoAppropriateSigningKeyInStore.Failure(keys))) + + def add( + transaction: SignedTopologyTransaction[TopologyChangeOp], + force: Boolean = false, + replaceExisting: Boolean = false, + allowDuplicateMappings: Boolean = false, + )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, E, Unit] = { + // Ensure sequential execution of `process`: When processing signed topology transactions, we test whether they can be + // added incrementally to the existing state. Therefore, we need to sequence + // (testing + adding) and ensure that we don't concurrently insert these + // transactions. + sequentialQueue.executeEUS( + process(transaction, force, replaceExisting, allowDuplicateMappings), + "add transaction", + ) + } + + protected val sequentialQueue = new SimpleExecutionQueue( + "topology-manager-queue", + futureSupervisor, + timeouts, + loggerFactory, + ) + + /** sequential(!) processing of topology transactions + * + * @param force force a dangerous change (such as revoking the last key) + * @param allowDuplicateMappings whether to reject a transaction if a similar transaction leading to the same result already exists + */ + protected def process[Op <: TopologyChangeOp]( + transaction: SignedTopologyTransaction[Op], + force: Boolean, + replaceExisting: Boolean, + allowDuplicateMappings: Boolean, + )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, E, Unit] = { + def checkValidationResult( + validated: Seq[ValidatedTopologyTransaction] + ): EitherT[Future, E, Unit] = { + EitherT.fromEither((validated.find(_.rejectionReason.nonEmpty) match { + case Some( + ValidatedTopologyTransaction( + `transaction`, + Some(rejection), + ) + ) => + Left(rejection.toTopologyManagerError) + case Some(tx: ValidatedTopologyTransaction) => + Left(TopologyManagerError.InternalError.ReplaceExistingFailed(tx)) + case None => Right(()) + }).leftMap(wrapError)) + } + + def addOneByOne( + transactions: Seq[SignedTopologyTransaction[TopologyChangeOp]] + ): EitherT[Future, E, Unit] = { + MonadUtil.sequentialTraverse_(transactions) { tx => + val now = clock.uniqueTime() + preNotifyObservers(Seq(tx)) + logger.info( + show"Applied topology transaction ${tx.transaction.op} ${tx.transaction.element.mapping} at $now" + ) + for { + _ <- EitherT.right( + store.append( + SequencedTime(now), + EffectiveTime(now), + Seq(ValidatedTopologyTransaction.valid(tx)), + ) + ): EitherT[Future, E, Unit] + _ <- EitherT.right(notifyObservers(now, Seq(tx))) + } yield () + } + } + + val isUniquenessRequired = transaction.operation match { + case TopologyChangeOp.Replace => false + case _ => true + } + + val now = clock.uniqueTime() + val ret = for { + // uniqueness check on store: ensure that transaction hasn't been added before + _ <- + if (isUniquenessRequired) + checkTransactionNotAddedBefore(transaction) + .leftMap(wrapError) + .mapK(FutureUnlessShutdown.outcomeK) + else EitherT.pure[FutureUnlessShutdown, E](()) + _ <- checkRemovalRefersToExisingTx(transaction) + .leftMap(wrapError) + .mapK(FutureUnlessShutdown.outcomeK) + _ <- checkMappingOfTxDoesNotExistYet(transaction, allowDuplicateMappings) + .leftMap(wrapError) + .mapK(FutureUnlessShutdown.outcomeK) + _ <- transactionIsNotDangerous(transaction, force).leftMap(wrapError) + _ <- checkNewTransaction(transaction, force).mapK( + FutureUnlessShutdown.outcomeK + ) // domain / participant specific checks + deactivateExisting <- removeExistingTransactions(transaction, replaceExisting).mapK( + FutureUnlessShutdown.outcomeK + ) + updateTx = transaction +: deactivateExisting + res <- EitherT + .right(validator.validateAndUpdateHeadAuthState(now, updateTx)) + .mapK(FutureUnlessShutdown.outcomeK) + _ <- checkValidationResult(res._2).mapK(FutureUnlessShutdown.outcomeK) + // TODO(i1251) batch adding once we overhaul the domain identity dispatcher (right now, adding multiple tx with same ts doesn't work) + _ <- addOneByOne(updateTx).mapK(FutureUnlessShutdown.outcomeK) + } yield () + + ret.leftMap { err => + // if there was an intermittent failure, just reset the auth validator (will reload the state) + validator.reset() + err + } + } + + protected def removeExistingTransactions( + transaction: SignedTopologyTransaction[TopologyChangeOp], + replaceExisting: Boolean, + )(implicit + traceContext: TraceContext + ): EitherT[Future, E, Seq[SignedTopologyTransaction[TopologyChangeOp]]] = + if (!replaceExisting || transaction.operation == TopologyChangeOp.Remove) { + EitherT.rightT(Seq()) + } else { + val (nsFilter, uidFilter) = transaction.uniquePath.maybeUid match { + case Some(uid) => (None, Some(Seq(uid))) + case None => (Some(Seq(transaction.uniquePath.namespace)), None) + } + + for { + rawTxs <- EitherT.right( + store.findPositiveTransactions( + asOf = CantonTimestamp.MaxValue, + asOfInclusive = false, + includeSecondary = false, + types = Seq(transaction.uniquePath.dbType), + filterUid = uidFilter, + filterNamespace = nsFilter, + ) + ) + reverse <- MonadUtil.sequentialTraverse( + rawTxs.adds.toDomainTopologyTransactions + .filter( + _.transaction.element.mapping.isReplacedBy( + transaction.transaction.element.mapping + ) + ) + )(x => + build( + x.transaction.reverse, + None, + protocolVersion, + ) + ) + } yield reverse + } + + protected def notifyObservers( + timestamp: CantonTimestamp, + transactions: Seq[SignedTopologyTransaction[TopologyChangeOp]], + )(implicit traceContext: TraceContext): Future[Unit] + + protected def preNotifyObservers(transactions: Seq[SignedTopologyTransaction[TopologyChangeOp]])( + implicit traceContext: TraceContext + ): Unit = {} + + protected def wrapError(error: TopologyManagerError)(implicit traceContext: TraceContext): E + + def genTransaction( + op: TopologyChangeOp, + mapping: TopologyMapping, + protocolVersion: ProtocolVersion, + )(implicit + traceContext: TraceContext + ): EitherT[Future, TopologyManagerError, TopologyTransaction[TopologyChangeOp]] = { + import TopologyChangeOp.* + (op, mapping) match { + case (Add, mapping: TopologyStateUpdateMapping) => + EitherT.rightT(TopologyStateUpdate.createAdd(mapping, protocolVersion)) + + case (Remove, mapping: TopologyStateUpdateMapping) => + for { + tx <- EitherT( + store + .findPositiveTransactionsForMapping(mapping) + .map( + _.headOption.toRight[TopologyManagerError]( + TopologyManagerError.NoCorrespondingActiveTxToRevoke.Mapping(mapping) + ) + ) + ) + } yield tx.transaction.reverse + + case (Replace, mapping: DomainGovernanceMapping) => + EitherT.pure(DomainGovernanceTransaction(mapping, protocolVersion)) + + case (op, mapping) => + EitherT.fromEither( + Left(TopologyManagerError.InternalError.IncompatibleOpMapping(op, mapping)) + ) + } + } + + override protected def closeAsync(): Seq[AsyncOrSyncCloseable] = { + Seq( + SyncCloseable("topology-manager-store", store.close()), + SyncCloseable("topology-manager-sequential-queue", sequentialQueue.close()), + ) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala new file mode 100644 index 0000000000..863c11353f --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala @@ -0,0 +1,493 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology + +import com.daml.error.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.RequireTypes.{PositiveInt, PositiveLong} +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.crypto.store.{CryptoPrivateStoreError, CryptoPublicStoreError} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.error.CantonErrorGroups.TopologyManagementErrorGroup.TopologyManagerErrorGroup +import com.digitalasset.canton.error.{Alarm, AlarmErrorCode, CantonError} +import com.digitalasset.canton.logging.ErrorLoggingContext +import com.digitalasset.canton.time.NonNegativeFiniteDuration +import com.digitalasset.canton.topology.processing.EffectiveTime +import com.digitalasset.canton.topology.store.ValidatedTopologyTransaction +import com.digitalasset.canton.topology.transaction.TopologyTransactionX.TxHash +import com.digitalasset.canton.topology.transaction.* + +sealed trait TopologyManagerError extends CantonError + +object TopologyManagerError extends TopologyManagerErrorGroup { + + @Explanation( + """This error indicates that there was an internal error within the topology manager.""" + ) + @Resolution("Inspect error message for details.") + object InternalError + extends ErrorCode( + id = "TOPOLOGY_MANAGER_INTERNAL_ERROR", + ErrorCategory.SystemInternalAssumptionViolated, + ) { + + final case class ImplementMe(msg: String = "")(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = "TODO(#14048) implement me" + (if (msg.nonEmpty) s": $msg" else "") + ) + with TopologyManagerError + + final case class Other(s: String)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = s"TODO(#14048) other failure: ${s}" + ) + with TopologyManagerError + + final case class CryptoPublicError(error: CryptoPublicStoreError)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = "Operation on the public crypto store failed" + ) + with TopologyManagerError + + final case class CryptoPrivateError(error: CryptoPrivateStoreError)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = "Operation on the secret crypto store failed" + ) + with TopologyManagerError + + final case class IncompatibleOpMapping(op: TopologyChangeOp, mapping: TopologyMapping)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = "The operation is incompatible with the mapping" + ) + with TopologyManagerError + + final case class TopologySigningError(error: SigningError)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = "Creating a signed transaction failed due to a crypto error" + ) + with TopologyManagerError + + final case class ReplaceExistingFailed(invalid: ValidatedTopologyTransaction)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = "Replacing existing transaction failed upon removal" + ) + with TopologyManagerError + + } + + @Explanation("""The topology manager has received a malformed message from another node.""") + @Resolution("Inspect the error message for details.") + object TopologyManagerAlarm extends AlarmErrorCode(id = "TOPOLOGY_MANAGER_ALARM") { + final case class Warn(override val cause: String)(implicit + override val loggingContext: ErrorLoggingContext + ) extends Alarm(cause) + with TopologyManagerError { + override lazy val logOnCreation: Boolean = false + } + } + + @Explanation("This error indicates that a topology transaction could not be found.") + @Resolution( + "The topology transaction either has been rejected, is not valid anymore, is not yet valid, or does not yet exist." + ) + object TopologyTransactionNotFound + extends ErrorCode( + id = "TOPOLOGY_TRANSACTION_NOT_FOUND", + ErrorCategory.InvalidGivenCurrentSystemStateResourceMissing, + ) { + final case class Failure(txHash: TxHash, effective: EffectiveTime)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + s"Topology transaction with hash ${txHash} does not exist or is not active or is not an active proposal at $effective" + ) + with TopologyManagerError + } + + @Explanation( + """This error indicates that the secret key with the respective fingerprint can not be found.""" + ) + @Resolution( + "Ensure you only use fingerprints of secret keys stored in your secret key store." + ) + object SecretKeyNotInStore + extends ErrorCode( + id = "SECRET_KEY_NOT_IN_STORE", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + final case class Failure(keyId: Fingerprint)(implicit val loggingContext: ErrorLoggingContext) + extends CantonError.Impl( + cause = "Secret key with given fingerprint could not be found" + ) + with TopologyManagerError + } + + @Explanation( + """This error indicates that a command contained a fingerprint referring to a public key not being present in the public key store.""" + ) + @Resolution( + "Upload the public key to the public key store using $node.keys.public.load(.) before retrying." + ) + object PublicKeyNotInStore + extends ErrorCode( + id = "PUBLIC_KEY_NOT_IN_STORE", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + final case class Failure(keyId: Fingerprint)(implicit val loggingContext: ErrorLoggingContext) + extends CantonError.Impl( + cause = "Public key with given fingerprint is missing in the public key store" + ) + with TopologyManagerError + } + + @Explanation( + """This error indicates that the uploaded signed transaction contained an invalid signature.""" + ) + @Resolution( + "Ensure that the transaction is valid and uses a crypto version understood by this participant." + ) + object InvalidSignatureError extends AlarmErrorCode(id = "INVALID_TOPOLOGY_TX_SIGNATURE_ERROR") { + + final case class Failure(error: SignatureCheckError)(implicit + override val loggingContext: ErrorLoggingContext + ) extends Alarm(cause = "Transaction signature verification failed") + with TopologyManagerError + } + + object SerialMismatch + extends ErrorCode(id = "SERIAL_MISMATCH", ErrorCategory.InvalidGivenCurrentSystemStateOther) { + final case class Failure(expected: PositiveInt, actual: PositiveInt)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = s"The given serial $actual did not match the expected serial $expected." + ) + with TopologyManagerError + } + + object WrongDomain + extends ErrorCode(id = "INVALID_DOMAIN", ErrorCategory.InvalidIndependentOfSystemState) { + final case class Failure(wrong: DomainId)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = s"Wrong domain $wrong" + ) + with TopologyManagerError + } + + @Explanation( + """This error indicates that a transaction has already been added previously.""" + ) + @Resolution( + """Nothing to do as the transaction is already registered. Note however that a revocation is " + + final. If you want to re-enable a statement, you need to re-issue an new transaction.""" + ) + object DuplicateTransaction + extends ErrorCode( + id = "DUPLICATE_TOPOLOGY_TRANSACTION", + ErrorCategory.InvalidGivenCurrentSystemStateResourceExists, + ) { + final case class Failure( + transaction: TopologyTransaction[TopologyChangeOp], + authKey: Fingerprint, + )(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = "The given topology transaction already exists." + ) + with TopologyManagerError + final case class ExistsAt(ts: CantonTimestamp)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = s"The given topology transaction already exists at ${ts}." + ) + with TopologyManagerError + } + + @Explanation( + """This error indicates that a topology transaction would create a state that already exists and has been authorized with the same key.""" + ) + @Resolution("""Your intended change is already in effect.""") + object MappingAlreadyExists + extends ErrorCode( + id = "TOPOLOGY_MAPPING_ALREADY_EXISTS", + ErrorCategory.InvalidGivenCurrentSystemStateResourceExists, + ) { + final case class Failure(existing: TopologyStateElement[TopologyMapping], authKey: Fingerprint)( + implicit val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + "A matching topology mapping authorized with the same key already exists in this state" + ) + with TopologyManagerError + + final case class FailureX(existing: TopologyMappingX, keys: NonEmpty[Set[Fingerprint]])(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + "A matching topology mapping x authorized with the same keys already exists in this state" + ) + with TopologyManagerError + } + + @Explanation( + """This error results if the topology manager did not find a secret key in its store to authorize a certain topology transaction.""" + ) + @Resolution("""Inspect your topology transaction and your secret key store and check that you have the + appropriate certificates and keys to issue the desired topology transaction. If the list of candidates is empty, + then you are missing the certificates.""") + object NoAppropriateSigningKeyInStore + extends ErrorCode( + id = "NO_APPROPRIATE_SIGNING_KEY_IN_STORE", + ErrorCategory.InvalidGivenCurrentSystemStateResourceMissing, + ) { + final case class Failure(candidates: Seq[Fingerprint])(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = "Could not find an appropriate signing key to issue the topology transaction" + ) + with TopologyManagerError + } + + @Explanation( + """This error indicates that the attempt to add a transaction was rejected, as the signing key is not authorized within the current state.""" + ) + @Resolution( + """Inspect the topology state and ensure that valid namespace or identifier delegations of the signing key exist or upload them before adding this transaction.""" + ) + object UnauthorizedTransaction extends AlarmErrorCode(id = "UNAUTHORIZED_TOPOLOGY_TRANSACTION") { + + final case class Failure()(implicit override val loggingContext: ErrorLoggingContext) + extends Alarm(cause = "Topology transaction is not properly authorized") + with TopologyManagerError + } + + @Explanation( + """This error indicates that the attempt to add a removal transaction was rejected, as the mapping / element affecting the removal did not exist.""" + ) + @Resolution( + """Inspect the topology state and ensure the mapping and the element id of the active transaction you are trying to revoke matches your revocation arguments.""" + ) + object NoCorrespondingActiveTxToRevoke + extends ErrorCode( + id = "NO_CORRESPONDING_ACTIVE_TX_TO_REVOKE", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + final case class Mapping(mapping: TopologyMapping)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + "There is no active topology transaction matching the mapping of the revocation request" + ) + with TopologyManagerError + final case class Element(element: TopologyStateElement[TopologyMapping])(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + "There is no active topology transaction matching the element of the revocation request" + ) + with TopologyManagerError + } + + @Explanation( + """This error indicates that the attempted key removal would remove the last valid key of the given entity, making the node unusable.""" + ) + @Resolution( + """Add the `force = true` flag to your command if you are really sure what you are doing.""" + ) + object RemovingLastKeyMustBeForced + extends ErrorCode( + id = "REMOVING_LAST_KEY_MUST_BE_FORCED", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + final case class Failure(key: Fingerprint, purpose: KeyPurpose)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = "Topology transaction would remove the last key of the given entity" + ) + with TopologyManagerError + } + + @Explanation( + """This error indicates that the attempted key removal would create dangling topology transactions, making the node unusable.""" + ) + @Resolution( + """Add the `force = true` flag to your command if you are really sure what you are doing.""" + ) + object RemovingKeyWithDanglingTransactionsMustBeForced + extends ErrorCode( + id = "REMOVING_KEY_DANGLING_TRANSACTIONS_MUST_BE_FORCED", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + final case class Failure(key: Fingerprint, purpose: KeyPurpose)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + "Topology transaction would remove a key that creates conflicts and dangling transactions" + ) + with TopologyManagerError + } + + @Explanation( + """This error indicates that it has been attempted to increase the ``ledgerTimeRecordTimeTolerance`` domain parameter in an insecure manner. + |Increasing this parameter may disable security checks and can therefore be a security risk. + |""" + ) + @Resolution( + """Make sure that the new value of ``ledgerTimeRecordTimeTolerance`` is at most half of the ``mediatorDeduplicationTimeout`` domain parameter. + | + |Use ``myDomain.service.set_ledger_time_record_time_tolerance`` for securely increasing ledgerTimeRecordTimeTolerance. + | + |Alternatively, add the ``force = true`` flag to your command, if security is not a concern for you. + |The security checks will be effective again after twice the new value of ``ledgerTimeRecordTimeTolerance``. + |Using ``force = true`` is safe upon domain bootstrapping. + |""" + ) + object IncreaseOfLedgerTimeRecordTimeTolerance + extends ErrorCode( + id = "INCREASE_OF_LEDGER_TIME_RECORD_TIME_TOLERANCE", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + final case class TemporarilyInsecure( + oldValue: NonNegativeFiniteDuration, + newValue: NonNegativeFiniteDuration, + )(implicit + override val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + s"The parameter ledgerTimeRecordTimeTolerance can currently not be increased to $newValue." + ) + with TopologyManagerError + + final case class PermanentlyInsecure( + newLedgerTimeRecordTimeTolerance: NonNegativeFiniteDuration, + mediatorDeduplicationTimeout: NonNegativeFiniteDuration, + )(implicit + override val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + s"Unable to increase ledgerTimeRecordTimeTolerance to $newLedgerTimeRecordTimeTolerance, because it must not be more than half of mediatorDeduplicationTimeout ($mediatorDeduplicationTimeout)." + ) + with TopologyManagerError + } + + @Explanation( + "This error indicates that the attempted update of the extra traffic limits for a particular member failed because the new limit is lower than the current limit." + ) + @Resolution( + """Extra traffic limits can only be increased. Submit the topology transaction with a higher limit. + |The metadata details of this error contain the expected minimum value in the field ``expectedMinimum``.""" + ) + object InvalidTrafficLimit + extends ErrorCode( + id = "INVALID_TRAFFIC_LIMIT", + ErrorCategory.InvalidIndependentOfSystemState, + ) { + final case class TrafficLimitTooLow( + member: Member, + actual: PositiveLong, + expectedMinimum: PositiveLong, + )(implicit + override val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + s"The extra traffic limit for $member should be at least $expectedMinimum, but was $actual." + ) + with TopologyManagerError + } + + @Explanation( + "This error indicates that a threshold in the submitted transaction was higher than the number of members that would have to satisfy that threshold." + ) + @Resolution( + """Submit the topology transaction with a lower threshold. + |The metadata details of this error contain the expected maximum in the field ``expectedMaximum``.""" + ) + object InvalidThreshold + extends ErrorCode(id = "INVALID_THRESHOLD", ErrorCategory.InvalidIndependentOfSystemState) { + final case class ThresholdTooHigh(actual: Int, expectedMaximum: Int)(implicit + override val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = s"Threshold must not be higher than $expectedMaximum, but was $actual." + ) + with TopologyManagerError + } + + @Explanation( + "This error indicates that members referenced in a topology transaction have not declared at least one signing key or at least 1 encryption key or both." + ) + @Resolution( + """Ensure that all members referenced in the topology transaction have declared at least one signing key and at least one encryption key, then resubmit the failed transaction. + |The metadata details of this error contain the members with the missing keys in the field ``members``.""" + ) + object InsufficientKeys + extends ErrorCode( + id = "INSUFFICIENT_KEYS", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + final case class Failure(members: Seq[Member])(implicit + override val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + s"Members ${members.sorted.mkString(", ")} are missing a signing key or an encryption key or both." + ) + with TopologyManagerError + } + + @Explanation( + "This error indicates that the topology transaction references members that are currently unknown." + ) + @Resolution( + """Wait for the onboarding of the members to be become active or remove the unknown members from the topology transaction. + |The metadata details of this error contain the unknown member in the field ``members``.""" + ) + object UnknownMembers + extends ErrorCode( + id = "UNKNOWN_MEMBERS", + ErrorCategory.InvalidGivenCurrentSystemStateResourceMissing, + ) { + final case class Failure(members: Seq[Member])(implicit + override val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = s"Members ${members.sorted.mkString(", ")} are unknown." + ) + with TopologyManagerError + } + + @Explanation( + """This error indicates that a participant is trying to rescind their domain trust certificate + |while still being hosting parties.""" + ) + @Resolution( + """The participant should work with the owners of the parties mentioned in the ``parties`` field in the + |error details metadata to get itself removed from the list of hosting participants of those parties.""" + ) + object IllegalRemovalOfDomainTrustCertificate + extends ErrorCode( + id = "ILLEGAL_REMOVAL_OF_DOMAIN_TRUST_CERTIFICATE", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + final case class ParticipantStillHostsParties( + participantId: ParticipantId, + parties: Seq[PartyId], + )(implicit + override val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + s"Cannot remove domain trust certificate for $participantId because it still hosts parties ${parties.sorted + .mkString(",")}" + ) + with TopologyManagerError + } + + abstract class DomainErrorGroup extends ErrorGroup() + abstract class ParticipantErrorGroup extends ErrorGroup() + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerStatus.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerStatus.scala new file mode 100644 index 0000000000..415dc81b40 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerStatus.scala @@ -0,0 +1,17 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology + +import com.digitalasset.canton.topology.store.TopologyStoreId + +trait TopologyManagerStatus { + def queueSize: Int +} + +object TopologyManagerStatus { + def combined(managers: TopologyManagerX[TopologyStoreId]*): TopologyManagerStatus = + new TopologyManagerStatus { + override def queueSize: Int = managers.map(_.queueSize).sum + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerX.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerX.scala new file mode 100644 index 0000000000..706b25ddce --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerX.scala @@ -0,0 +1,426 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology + +import cats.data.EitherT +import cats.syntax.parallel.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown, Lifecycle} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} +import com.digitalasset.canton.topology.store.TopologyStoreId.{AuthorizedStore, DomainStore} +import com.digitalasset.canton.topology.store.{TopologyStoreId, TopologyStoreX} +import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX +import com.digitalasset.canton.topology.transaction.TopologyTransactionX.TxHash +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.FutureInstances.* +import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.canton.util.SimpleExecutionQueue +import com.digitalasset.canton.version.ProtocolVersion +import com.google.common.annotations.VisibleForTesting + +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.{ExecutionContext, Future} + +trait TopologyManagerObserver { + def addedNewTransactions( + timestamp: CantonTimestamp, + transactions: Seq[SignedTopologyTransactionX[TopologyChangeOpX, TopologyMappingX]], + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] +} + +class DomainTopologyManagerX( + clock: Clock, + crypto: Crypto, + override val store: TopologyStoreX[DomainStore], + val outboxQueue: DomainOutboxQueue, + enableTopologyTransactionValidation: Boolean, + timeouts: ProcessingTimeout, + futureSupervisor: FutureSupervisor, + loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends TopologyManagerX[DomainStore]( + clock, + crypto, + store, + timeouts, + futureSupervisor, + loggerFactory, + ) { + override protected val processor: TopologyStateProcessorX = + new TopologyStateProcessorX( + store, + Some(outboxQueue), + enableTopologyTransactionValidation, + new ValidatingTopologyMappingXChecks(store, loggerFactory), + crypto, + loggerFactory, + ) + + // When evaluating transactions against the domain store, we want to validate against + // the head state. We need to take all previously sequenced transactions into account, because + // we don't know when the submitted transaction actually gets sequenced. + override def timestampForValidation(): CantonTimestamp = CantonTimestamp.MaxValue +} + +class AuthorizedTopologyManagerX( + clock: Clock, + crypto: Crypto, + store: TopologyStoreX[AuthorizedStore], + enableTopologyTransactionValidation: Boolean, + timeouts: ProcessingTimeout, + futureSupervisor: FutureSupervisor, + loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends TopologyManagerX[AuthorizedStore]( + clock, + crypto, + store, + timeouts, + futureSupervisor, + loggerFactory, + ) { + override protected val processor: TopologyStateProcessorX = + new TopologyStateProcessorX( + store, + None, + enableTopologyTransactionValidation, + NoopTopologyMappingXChecks, + crypto, + loggerFactory, + ) + + // for the authorized store, we take the next unique timestamp, because transactions + // are directly persisted into the store. + override def timestampForValidation(): CantonTimestamp = clock.uniqueTime() +} + +abstract class TopologyManagerX[+StoreID <: TopologyStoreId]( + val clock: Clock, + val crypto: Crypto, + val store: TopologyStoreX[StoreID], + val timeouts: ProcessingTimeout, + futureSupervisor: FutureSupervisor, + protected val loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends TopologyManagerStatus + with NamedLogging + with FlagCloseable { + + def timestampForValidation(): CantonTimestamp + + // sequential queue to run all the processing that does operate on the state + protected val sequentialQueue = new SimpleExecutionQueue( + "topology-manager-x-queue", + futureSupervisor, + timeouts, + loggerFactory, + ) + + protected val processor: TopologyStateProcessorX + + override def queueSize: Int = sequentialQueue.queueSize + + private val observers = new AtomicReference[Seq[TopologyManagerObserver]](Seq.empty) + def addObserver(observer: TopologyManagerObserver): Unit = + observers.updateAndGet(_ :+ observer).discard + + @VisibleForTesting + def clearObservers(): Unit = observers.set(Seq.empty) + + /** Authorizes a new topology transaction by signing it and adding it to the topology state + * + * @param op the operation that should be performed + * @param mapping the mapping that should be added + * @param signingKeys the key which should be used to sign + * @param protocolVersion the protocol version corresponding to the transaction + * @param expectFullAuthorization whether the transaction must be fully signed and authorized by keys on this node + * @param force force dangerous operations, such as removing the last signing key of a participant + * @return the domain state (initialized or not initialized) or an error code of why the addition failed + */ + def proposeAndAuthorize( + op: TopologyChangeOpX, + mapping: TopologyMappingX, + serial: Option[PositiveInt], + signingKeys: Seq[Fingerprint], + protocolVersion: ProtocolVersion, + expectFullAuthorization: Boolean, + force: Boolean = false, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyManagerError, GenericSignedTopologyTransactionX] = { + logger.debug(show"Attempting to build, sign, and ${op} ${mapping} with serial $serial") + for { + tx <- build(op, mapping, serial, protocolVersion, signingKeys).mapK( + FutureUnlessShutdown.outcomeK + ) + signedTx <- signTransaction(tx, signingKeys, isProposal = !expectFullAuthorization) + .mapK(FutureUnlessShutdown.outcomeK) + _ <- add(Seq(signedTx), force = force, expectFullAuthorization) + } yield signedTx + } + + /** Authorizes an existing topology transaction by signing it and adding it to the topology state. + * If {@code expectFullAuthorization} is {@code true} and the topology transaction cannot be fully + * authorized with keys from this node, returns with an error and the existing topology transaction + * remains unchanged. + * + * @param transactionHash the uniquely identifying hash of a previously proposed topology transaction + * @param signingKeys the key which should be used to sign + * @param force force dangerous operations, such as removing the last signing key of a participant + * @param expectFullAuthorization whether the resulting transaction must be fully authorized or not + * @return the signed transaction or an error code of why the addition failed + */ + def accept( + transactionHash: TxHash, + signingKeys: Seq[Fingerprint], + force: Boolean, + expectFullAuthorization: Boolean, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyManagerError, GenericSignedTopologyTransactionX] = { + val effective = EffectiveTime(clock.now) + for { + transactionsForHash <- EitherT + .right[TopologyManagerError]( + store.findTransactionsByTxHash(effective, NonEmpty(Set, transactionHash)) + ) + .mapK(FutureUnlessShutdown.outcomeK) + existingTransaction <- + EitherT.fromEither[FutureUnlessShutdown][ + TopologyManagerError, + GenericSignedTopologyTransactionX, + ](transactionsForHash match { + case Seq(tx) => Right(tx) + case Seq() => + Left( + TopologyManagerError.TopologyTransactionNotFound.Failure(transactionHash, effective) + ) + case tooManyActiveTransactionsWithSameHash => + // TODO(#12390) proper error + Left( + TopologyManagerError.InternalError.ImplementMe( + s"found too many transactions for txHash=$transactionsForHash: $tooManyActiveTransactionsWithSameHash" + ) + ) + }) + extendedTransaction <- extendSignature(existingTransaction, signingKeys).mapK( + FutureUnlessShutdown.outcomeK + ) + _ <- add( + Seq(extendedTransaction), + force = force, + expectFullAuthorization = expectFullAuthorization, + ) + } yield { + extendedTransaction + } + } + + def build[Op <: TopologyChangeOpX, M <: TopologyMappingX]( + op: Op, + mapping: M, + serial: Option[PositiveInt], + protocolVersion: ProtocolVersion, + newSigningKeys: Seq[Fingerprint], + )(implicit + traceContext: TraceContext + ): EitherT[Future, TopologyManagerError, TopologyTransactionX[Op, M]] = { + for { + existingTransactions <- EitherT.right( + store.findTransactionsForMapping(EffectiveTime.MaxValue, NonEmpty(Set, mapping.uniqueKey)) + ) + _ = if (existingTransactions.sizeCompare(1) > 0) + logger.warn( + s"found more than one valid mapping for unique key ${mapping.uniqueKey} of type ${mapping.code}" + ) + existingTransaction = existingTransactions + .sortBy(_.transaction.serial) + .lastOption + .map(t => (t.transaction.op, t.transaction.mapping, t.transaction.serial, t.signatures)) + + // If the same operation and mapping is proposed repeatedly, insist that + // new keys are being added. Otherwise reject consistently with daml 2.x-based topology management. + _ <- existingTransaction match { + case Some((`op`, `mapping`, _, existingSignatures)) if op == TopologyChangeOpX.Replace => + EitherT.cond[Future][TopologyManagerError, Unit]( + (newSigningKeys.toSet -- existingSignatures.map(_.signedBy).toSet).nonEmpty, + (), + TopologyManagerError.MappingAlreadyExists + .FailureX(mapping, existingSignatures.map(_.signedBy)), + ) + case _ => EitherT.rightT[Future, TopologyManagerError](()) + } + + theSerial <- ((existingTransaction, serial) match { + case (None, None) => + // auto-select 1 + EitherT.rightT(PositiveInt.one) + case (None, Some(proposed)) => + // didn't find an existing transaction, therefore the proposed serial must be 1 + EitherT.cond[Future][TopologyManagerError, PositiveInt]( + proposed == PositiveInt.one, + PositiveInt.one, + TopologyManagerError.SerialMismatch.Failure(PositiveInt.one, proposed), + ) + + // TODO(#12390) existing mapping and the proposed mapping are the same. does this only add a (superfluous) signature? + // maybe we should reject this proposal, but for now we need this to pass through successfully, because we don't + // support proper topology transaction validation yet, especially not for multi-sig transactions. + case (Some((`op`, `mapping`, existingSerial, _)), None) => + // auto-select existing + EitherT.rightT(existingSerial) + case (Some((`op`, `mapping`, existingSerial, _)), Some(proposed)) => + EitherT.cond[Future]( + existingSerial == proposed, + existingSerial, + TopologyManagerError.SerialMismatch.Failure(existingSerial, proposed), + ) + + case (Some((_, _, existingSerial, _)), None) => + // auto-select existing+1 + EitherT.rightT(existingSerial.increment) + case (Some((_, _, existingSerial, _)), Some(proposed)) => + // check that the proposed serial matches existing+1 + val next = existingSerial.increment + EitherT.cond[Future]( + next == proposed, + next, + TopologyManagerError.SerialMismatch.Failure(next, proposed), + ) + }): EitherT[Future, TopologyManagerError, PositiveInt] + } yield TopologyTransactionX(op, theSerial, mapping, protocolVersion) + } + + def signTransaction[Op <: TopologyChangeOpX, M <: TopologyMappingX]( + transaction: TopologyTransactionX[Op, M], + signingKeys: Seq[Fingerprint], + isProposal: Boolean, + )(implicit + traceContext: TraceContext + ): EitherT[Future, TopologyManagerError, SignedTopologyTransactionX[Op, M]] = { + for { + // find signing keys. + keys <- (signingKeys match { + case first +: rest => + // TODO(#12945) should we check whether this node could sign with keys that are required in addition to the ones provided in signingKeys, and fetch those keys? + EitherT.pure(NonEmpty.mk(Set, first, rest: _*)) + case _empty => + // TODO(#12945) get signing keys for transaction. + EitherT.leftT( + TopologyManagerError.InternalError.ImplementMe( + "Automatic signing key lookup not yet implemented. Please specify a signing explicitly." + ) + ) + }): EitherT[Future, TopologyManagerError, NonEmpty[Set[Fingerprint]]] + // create signed transaction + signed <- SignedTopologyTransactionX + .create( + transaction, + keys, + isProposal, + crypto.privateCrypto, + // TODO(#14048) The `SignedTopologyTransactionX` may use a different versioning scheme than the contained transaction. Use the right protocol version here + transaction.representativeProtocolVersion.representative, + ) + .leftMap { + case SigningError.UnknownSigningKey(keyId) => + TopologyManagerError.SecretKeyNotInStore.Failure(keyId) + case err => TopologyManagerError.InternalError.TopologySigningError(err) + }: EitherT[Future, TopologyManagerError, SignedTopologyTransactionX[Op, M]] + } yield signed + } + + def extendSignature[Op <: TopologyChangeOpX, M <: TopologyMappingX]( + transaction: SignedTopologyTransactionX[Op, M], + signingKey: Seq[Fingerprint], + )(implicit + traceContext: TraceContext + ): EitherT[Future, TopologyManagerError, SignedTopologyTransactionX[Op, M]] = { + for { + // find signing keys + keys <- (signingKey match { + case keys @ (_first +: _rest) => + // TODO(#12945) filter signing keys relevant for the required authorization for this transaction + EitherT.rightT(keys.toSet) + case _ => + // TODO(#12945) fetch signing keys that are relevant for the required authorization for this transaction + EitherT.leftT( + TopologyManagerError.InternalError.ImplementMe( + "Automatic signing key lookup not yet implemented. Please specify a signing explicitly." + ) + ) + }): EitherT[Future, TopologyManagerError, Set[Fingerprint]] + signatures <- keys.toSeq.parTraverse( + crypto.privateCrypto + .sign(transaction.transaction.hash.hash, _) + .leftMap(err => + TopologyManagerError.InternalError.TopologySigningError(err): TopologyManagerError + ) + ) + } yield transaction.addSignatures(signatures) + } + + /** sequential(!) adding of topology transactions + * + * @param force force a dangerous change (such as revoking the last key) + */ + def add( + transactions: Seq[GenericSignedTopologyTransactionX], + force: Boolean, + expectFullAuthorization: Boolean, + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, TopologyManagerError, Unit] = + sequentialQueue.executeE( + { + val ts = timestampForValidation() + for { + // validate incrementally and apply to in-memory state + _ <- processor + .validateAndApplyAuthorization( + SequencedTime(ts), + EffectiveTime(ts), + transactions, + abortIfCascading = !force, + expectFullAuthorization, + ) + .leftMap { res => + // a "duplicate rejection" is not a reason to report an error as it's just a no-op + res.flatMap(_.nonDuplicateRejectionReason).headOption match { + case Some(rejection) => rejection.toTopologyManagerError + case None => + TopologyManagerError.InternalError + .Other("Topology transaction validation failed but there are no rejections") + } + } + _ <- EitherT.right(notifyObservers(ts, transactions)) + } yield () + }, + "add-topology-transaction", + ) + + /** notify observers about new transactions about to be stored */ + protected def notifyObservers( + timestamp: CantonTimestamp, + transactions: Seq[GenericSignedTopologyTransactionX], + )(implicit traceContext: TraceContext): Future[Unit] = Future + .sequence( + observers + .get() + .map(_.addedNewTransactions(timestamp, transactions).onShutdown(())) + ) + .map(_ => ()) + + override protected def onClosed(): Unit = Lifecycle.close(store, sequentialQueue)(logger) + + override def toString: String = s"TopologyManagerX[${store.storeId}]" +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyStateProcessorX.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyStateProcessorX.scala new file mode 100644 index 0000000000..18276fc24a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyStateProcessorX.scala @@ -0,0 +1,420 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology + +import cats.data.EitherT +import cats.instances.seq.* +import cats.syntax.foldable.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.crypto.Crypto +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.topology.processing.{ + EffectiveTime, + IncomingTopologyTransactionAuthorizationValidatorX, + SequencedTime, +} +import com.digitalasset.canton.topology.store.ValidatedTopologyTransactionX.GenericValidatedTopologyTransactionX +import com.digitalasset.canton.topology.store.{ + TopologyStoreId, + TopologyStoreX, + TopologyTransactionRejection, + ValidatedTopologyTransactionX, +} +import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX +import com.digitalasset.canton.topology.transaction.TopologyMappingX.MappingHash +import com.digitalasset.canton.topology.transaction.TopologyMappingXChecks +import com.digitalasset.canton.topology.transaction.TopologyTransactionX.TxHash +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ErrorUtil + +import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference} +import scala.collection.concurrent.TrieMap +import scala.concurrent.{ExecutionContext, Future} + +/** @param outboxQueue If a [[DomainOutboxQueue]] is provided, the processed transactions are not directly stored, + * but rather sent to the domain via an ephemeral queue (i.e. no persistence). + * @param enableTopologyTransactionValidation If disabled, all of the authorization validation logic in + * IncomingTopologyTransactionAuthorizationValidatorX is skipped. + */ +class TopologyStateProcessorX( + val store: TopologyStoreX[TopologyStoreId], + outboxQueue: Option[DomainOutboxQueue], + enableTopologyTransactionValidation: Boolean, + topologyMappingXChecks: TopologyMappingXChecks, + crypto: Crypto, + loggerFactoryParent: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends NamedLogging { + + override protected val loggerFactory: NamedLoggerFactory = + loggerFactoryParent.append("store", store.storeId.toString) + + // small container to store potentially pending data + private case class MaybePending(originalTx: GenericSignedTopologyTransactionX) { + val adjusted = new AtomicReference[Option[GenericSignedTopologyTransactionX]](None) + val rejection = new AtomicReference[Option[TopologyTransactionRejection]](None) + val expireImmediately = new AtomicBoolean(false) + + def currentTx: GenericSignedTopologyTransactionX = adjusted.get().getOrElse(originalTx) + + def validatedTx: GenericValidatedTopologyTransactionX = + ValidatedTopologyTransactionX(currentTx, rejection.get(), expireImmediately.get()) + } + + // TODO(#14063) use cache instead and remember empty + private val txForMapping = TrieMap[MappingHash, MaybePending]() + private val proposalsByMapping = TrieMap[MappingHash, Seq[TxHash]]() + private val proposalsForTx = TrieMap[TxHash, MaybePending]() + + private val authValidator = + new IncomingTopologyTransactionAuthorizationValidatorX( + crypto.pureCrypto, + store, + None, + loggerFactory.append("role", "incoming"), + ) + + // compared to the old topology stores, the x stores don't distinguish between + // state & transaction store. cascading deletes are irrevocable and delete everything + // that depended on a certificate. + + /** validate the authorization and the signatures of the given transactions + * + * The function is NOT THREAD SAFE AND MUST RUN SEQUENTIALLY + */ + def validateAndApplyAuthorization( + sequenced: SequencedTime, + effective: EffectiveTime, + transactions: Seq[GenericSignedTopologyTransactionX], + // TODO(#12390) propagate and abort unless we use force + abortIfCascading: Boolean, + expectFullAuthorization: Boolean, + )(implicit + traceContext: TraceContext + ): EitherT[Future, Seq[GenericValidatedTopologyTransactionX], Seq[ + GenericValidatedTopologyTransactionX + ]] = { + // if transactions aren't persisted in the store but rather enqueued in the domain outbox queue, + // the processing should abort on errors, because we don't want to enqueue rejected transactions. + val abortOnError = outboxQueue.nonEmpty + + type Lft = Seq[GenericValidatedTopologyTransactionX] + + // first, pre-load the currently existing mappings and proposals for the given transactions + val preloadTxsForMappingF = preloadTxsForMapping(EffectiveTime.MaxValue, transactions) + val preloadProposalsForTxF = preloadProposalsForTx(EffectiveTime.MaxValue, transactions) + // TODO(#14064) preload authorization data + val ret = for { + _ <- EitherT.right[Lft](preloadProposalsForTxF) + _ <- EitherT.right[Lft](preloadTxsForMappingF) + // compute / collapse updates + (removesF, pendingWrites) = { + val pendingWrites = transactions.map(MaybePending) + val removes = pendingWrites + .foldLeftM((Set.empty[MappingHash], Set.empty[TxHash])) { + case ((removeMappings, removeTxs), tx) => + validateAndMerge( + effective, + tx.originalTx, + expectFullAuthorization || !tx.originalTx.isProposal, + ).map { finalTx => + tx.adjusted.set(Some(finalTx.transaction)) + tx.rejection.set(finalTx.rejectionReason) + determineRemovesAndUpdatePending(tx, removeMappings, removeTxs) + } + } + (removes, pendingWrites) + } + removes <- EitherT.right[Lft](removesF) + (mappingRemoves, txRemoves) = removes + validatedTx = pendingWrites.map(pw => pw.validatedTx) + _ <- EitherT.cond[Future]( + // TODO(#12390) differentiate error reason and only abort actual errors, not in-batch merges + !abortOnError || validatedTx.forall(_.nonDuplicateRejectionReason.isEmpty), + (), { + // reset caches as they are broken now if we abort + clearCaches() + validatedTx + }: Lft, + ): EitherT[Future, Lft, Unit] + + _ <- outboxQueue match { + case Some(queue) => + // if we use the domain outbox queue, we must also reset the caches, because the local validation + // doesn't automatically imply successful validation once the transactions have been sequenced. + clearCaches() + EitherT.rightT[Future, Lft](queue.enqueue(validatedTx.map(_.transaction))) + + case None => + EitherT.right[Lft]( + store.update( + sequenced, + effective, + mappingRemoves, + txRemoves, + validatedTx, + ) + ) + } + } yield validatedTx + ret.bimap( + failed => { + logger.info("Topology transactions failed:\n " + failed.mkString("\n ")) + failed + }, + success => { + if (outboxQueue.isEmpty) { + logger.info( + s"Persisted topology transactions ($sequenced, $effective):\n" + success + .mkString( + ",\n" + ) + ) + } else logger.info("Enqueued topology transactions:\n" + success.mkString((",\n"))) + success + }, + ) + } + + private def clearCaches(): Unit = { + txForMapping.clear() + proposalsForTx.clear() + proposalsByMapping.clear() + authValidator.reset() + } + + private def preloadTxsForMapping( + effective: EffectiveTime, + transactions: Seq[GenericSignedTopologyTransactionX], + )(implicit traceContext: TraceContext): Future[Unit] = { + val hashes = NonEmpty.from( + transactions + .map(x => x.transaction.mapping.uniqueKey) + .filterNot(txForMapping.contains) + .toSet + ) + hashes.fold(Future.unit) { + store + .findTransactionsForMapping(effective, _) + .map(_.foreach { item => + txForMapping.put(item.transaction.mapping.uniqueKey, MaybePending(item)).discard + }) + } + } + + private def trackProposal(txHash: TxHash, mappingHash: MappingHash): Unit = { + proposalsByMapping + .updateWith(mappingHash) { + case None => Some(Seq(txHash)) + case Some(seq) => Some(seq :+ txHash) + } + .discard + } + + private def preloadProposalsForTx( + effective: EffectiveTime, + transactions: Seq[GenericSignedTopologyTransactionX], + )(implicit traceContext: TraceContext): Future[Unit] = { + val hashes = + NonEmpty.from( + transactions + .map(x => x.transaction.hash) + .filterNot(proposalsForTx.contains) + .toSet + ) + + hashes.fold(Future.unit) { + store + .findProposalsByTxHash(effective, _) + .map(_.foreach { item => + val txHash = item.transaction.hash + // store the proposal + proposalsForTx.put(txHash, MaybePending(item)).discard + // maintain a map from mapping to txs + trackProposal(txHash, item.transaction.mapping.uniqueKey) + }) + } + } + + private def serialIsMonotonicallyIncreasing( + inStore: Option[GenericSignedTopologyTransactionX], + toValidate: GenericSignedTopologyTransactionX, + ): Either[TopologyTransactionRejection, Unit] = inStore match { + case Some(value) => + val expected = value.transaction.serial.increment + Either.cond( + expected == toValidate.transaction.serial, + (), + TopologyTransactionRejection.SerialMismatch(expected, toValidate.transaction.serial), + ) + case None => Right(()) + } + + private def transactionIsAuthorized( + effective: EffectiveTime, + inStore: Option[GenericSignedTopologyTransactionX], + toValidate: GenericSignedTopologyTransactionX, + expectFullAuthorization: Boolean, + )(implicit + traceContext: TraceContext + ): EitherT[Future, TopologyTransactionRejection, GenericSignedTopologyTransactionX] = { + if (enableTopologyTransactionValidation) { + EitherT + .right( + authValidator + .validateAndUpdateHeadAuthState( + effective.value, + Seq(toValidate), + inStore.map(tx => tx.transaction.mapping.uniqueKey -> tx).toList.toMap, + expectFullAuthorization, + ) + ) + .subflatMap { case (_, txs) => + // TODO(#12390) proper error + txs.headOption + .toRight[TopologyTransactionRejection]( + TopologyTransactionRejection.Other("expected validation result doesn't exist") + ) + .flatMap(tx => tx.rejectionReason.toLeft(tx.transaction)) + } + } else { + EitherT.rightT(toValidate.copy(isProposal = false)) + } + } + + private def deduplicateAndMergeSignatures( + inStore: Option[GenericSignedTopologyTransactionX], + toValidate: GenericSignedTopologyTransactionX, + ): Either[TopologyTransactionRejection, (Boolean, GenericSignedTopologyTransactionX)] = + inStore match { + case Some(value) if value.transaction.hash == toValidate.transaction.hash => + if (toValidate.signatures.diff(value.signatures).isEmpty) { + // the new transaction doesn't provide any new signatures => Duplicate + // TODO(#12390) use proper timestamp or remove the timestamp from the error? + Left(TopologyTransactionRejection.Duplicate(CantonTimestamp.MinValue)) + } else Right((true, value.addSignatures(toValidate.signatures.toSeq))) + + case _ => Right((false, toValidate)) + } + + private def mergeWithPendingProposal( + toValidate: GenericSignedTopologyTransactionX + ): GenericSignedTopologyTransactionX = { + proposalsForTx.get(toValidate.transaction.hash) match { + case None => toValidate + case Some(existingProposal) => + toValidate.addSignatures(existingProposal.validatedTx.transaction.signatures.toSeq) + } + } + + private def validateAndMerge( + effective: EffectiveTime, + txA: GenericSignedTopologyTransactionX, + expectFullAuthorization: Boolean, + )(implicit traceContext: TraceContext): Future[GenericValidatedTopologyTransactionX] = { + // get current valid transaction for the given mapping + val tx_inStore = txForMapping.get(txA.transaction.mapping.uniqueKey).map(_.currentTx) + // first, merge a pending proposal with this transaction. we do this as it might + // subsequently activate the given transaction + val tx_mergedProposalSignatures = mergeWithPendingProposal(txA) + val ret = for { + mergeResult <- EitherT.fromEither[Future]( + deduplicateAndMergeSignatures(tx_inStore, tx_mergedProposalSignatures) + ) + (isMerge, tx_deduplicatedAndMerged) = mergeResult + // we check if the transaction is properly authorized given the current topology state + // if it is a proposal, then we demand that all signatures are appropriate (but + // not necessarily sufficient) + tx_authorized <- transactionIsAuthorized( + effective, + tx_inStore, + tx_deduplicatedAndMerged, + expectFullAuthorization, + ) + + // Run mapping specific semantic checks + _ <- topologyMappingXChecks.checkTransaction(effective, tx_authorized, tx_inStore) + + // we potentially merge the transaction with the currently active if this is just a signature update + // now, check if the serial is monotonically increasing + fullyValidated <- + if (isMerge) + EitherT.rightT[Future, TopologyTransactionRejection](tx_authorized) + else { + EitherT.fromEither[Future]( + serialIsMonotonicallyIncreasing(tx_inStore, tx_authorized).map(_ => tx_authorized) + ) + } + } yield fullyValidated + ret.fold( + // TODO(#12390) emit appropriate log message and use correct rejection reason + rejection => ValidatedTopologyTransactionX(txA, Some(rejection)), + tx => ValidatedTopologyTransactionX(tx, None), + ) + } + + private def determineRemovesAndUpdatePending( + tx: MaybePending, + removeMappings: Set[MappingHash], + removeTxs: Set[TxHash], + )(implicit traceContext: TraceContext): (Set[MappingHash], Set[TxHash]) = { + val finalTx = tx.currentTx + // UPDATE tx SET valid_until = effective WHERE storeId = XYZ + // AND valid_until is NULL and valid_from < effective + + if (tx.rejection.get().nonEmpty) { + // if the transaction has been rejected, we don't actually expire any proposals or currently valid transactions + (removeMappings, removeTxs) + } else if (finalTx.isProposal) { + // if this is a proposal, we only delete the "previously existing proposal" + // AND ((tx_hash = ..)) + val txHash = finalTx.transaction.hash + proposalsForTx.put(txHash, tx).foreach { existingProposal => + // update currently pending (this is relevant in case we have proposals for the + // same txs within a batch) + existingProposal.expireImmediately.set(true) + ErrorUtil.requireState( + existingProposal.rejection.get().isEmpty, + s"Error state should be empty for ${existingProposal}", + ) + } + trackProposal(txHash, finalTx.transaction.mapping.uniqueKey) + (removeMappings, removeTxs + txHash) + } else { + // if this is a sufficiently signed and valid transaction, we delete all existing proposals and the previous tx + // we can just use a mapping key: there can not be a future proposal, as it would violate the + // monotonically increasing check + // AND ((mapping_key = ...) ) + val mappingHash = finalTx.transaction.mapping.uniqueKey + txForMapping.put(mappingHash, tx).foreach { existingMapping => + // replace previous tx in case we have concurrent updates within the same timestamp + existingMapping.expireImmediately.set(true) + ErrorUtil.requireState( + existingMapping.rejection.get().isEmpty, + s"Error state should be empty for ${existingMapping}", + ) + } + // remove all pending proposals for this mapping + proposalsByMapping + .remove(mappingHash) + .foreach( + _.foreach(proposal => + proposalsForTx.remove(proposal).foreach { existing => + val cur = existing.rejection.getAndSet( + Some(TopologyTransactionRejection.Other("Outdated proposal within batch")) + ) + ErrorUtil.requireState(cur.isEmpty, s"Error state should be empty for ${existing}") + } + ) + ) + // TODO(#12390) if this is a removal of a certificate, compute cascading deletes + // if boolean flag is set, then abort, otherwise notify + // rules: if a namespace delegation is a root delegation, it won't be affected by the + // cascading deletion of its authorizer. this will allow us to roll namespace certs + // also, root cert authorization is only valid if serial == 1 + (removeMappings + mappingHash, removeTxs) + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/CachingDomainTopologyClient.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/CachingDomainTopologyClient.scala new file mode 100644 index 0000000000..47d0711a5e --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/CachingDomainTopologyClient.scala @@ -0,0 +1,615 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.client + +import cats.data.EitherT +import cats.syntax.parallel.* +import com.daml.lf.data.Ref.PackageId +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.{BatchingConfig, CachingConfigs, ProcessingTimeout} +import com.digitalasset.canton.crypto.SigningPublicKey +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.protocol.DynamicDomainParametersWithValidity +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.client.PartyTopologySnapshotClient.PartyInfo +import com.digitalasset.canton.topology.processing.* +import com.digitalasset.canton.topology.store.{TopologyStore, TopologyStoreId, TopologyStoreX} +import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.tracing.{NoTracing, TraceContext} +import com.digitalasset.canton.util.FutureInstances.* +import com.digitalasset.canton.util.{ErrorUtil, MonadUtil} +import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.{LfPartyId, SequencerCounter} + +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.duration.Duration +import scala.concurrent.{ExecutionContext, Future, Promise} + +sealed abstract class BaseCachingDomainTopologyClient( + protected val clock: Clock, + parent: DomainTopologyClientWithInit, + cachingConfigs: CachingConfigs, + batchingConfig: BatchingConfig, + val timeouts: ProcessingTimeout, + override protected val futureSupervisor: FutureSupervisor, + val loggerFactory: NamedLoggerFactory, +)(implicit val executionContext: ExecutionContext) + extends DomainTopologyClientWithInit + with NamedLogging { + + override def updateHead( + effectiveTimestamp: EffectiveTime, + approximateTimestamp: ApproximateTime, + potentialTopologyChange: Boolean, + )(implicit traceContext: TraceContext): Unit = { + if (snapshots.get().isEmpty) { + appendSnapshot(approximateTimestamp.value) + } + if (potentialTopologyChange) + appendSnapshot(effectiveTimestamp.value) + parent.updateHead(effectiveTimestamp, approximateTimestamp, potentialTopologyChange) + } + + // snapshot caching entry + // this one is quite a special cache. generally, we want to avoid loading too much data from the database. + // now, we know that if there was no identity update between tx and ty, then snapshot(ty) == snapshot(tx) + // therefore, we remember the list of timestamps when updates happened and used that list in order to figure + // out which snapshot we can use instead of loading the data again and again. + // so we use the snapshots list to figure out the update timestamp and then we use the pointwise cache + // to load that update timestamp. + protected class SnapshotEntry(val timestamp: CantonTimestamp) { + def get(): CachingTopologySnapshot = pointwise.get(timestamp.immediateSuccessor) + } + protected val snapshots = new AtomicReference[List[SnapshotEntry]](List.empty) + + private val pointwise = cachingConfigs.topologySnapshot + .buildScaffeine() + .build[CantonTimestamp, CachingTopologySnapshot] { (ts: CantonTimestamp) => + new CachingTopologySnapshot( + parent.trySnapshot(ts)(TraceContext.empty), + cachingConfigs, + batchingConfig, + loggerFactory, + ) + } + + protected def appendSnapshot(timestamp: CantonTimestamp): Unit = { + val item = new SnapshotEntry(timestamp) + val _ = snapshots.updateAndGet { cur => + if (cur.headOption.exists(_.timestamp > timestamp)) + cur + else + item :: (cur.filter( + _.timestamp.plusMillis( + cachingConfigs.topologySnapshot.expireAfterAccess.duration.toMillis + ) > timestamp + )) + } + } + + override def trySnapshot( + timestamp: CantonTimestamp + )(implicit traceContext: TraceContext): TopologySnapshotLoader = { + ErrorUtil.requireArgument( + timestamp <= topologyKnownUntilTimestamp, + s"requested snapshot=$timestamp, available snapshot=$topologyKnownUntilTimestamp", + ) + // find a matching existing snapshot + val cur = + snapshots.get().find(_.timestamp < timestamp) // note that timestamps are asOf exclusive + cur match { + // we'll use the cached snapshot client which defines the time-period this timestamp is in + case Some(snapshotEntry) => + new ForwardingTopologySnapshotClient(timestamp, snapshotEntry.get(), loggerFactory) + // this timestamp is outside of the window where we have tracked the timestamps of changes. + // so let's do this pointwise + case None => + pointwise.get(timestamp) + } + + } + + override def domainId: DomainId = parent.domainId + + override def snapshotAvailable(timestamp: CantonTimestamp): Boolean = + parent.snapshotAvailable(timestamp) + override def awaitTimestamp( + timestamp: CantonTimestamp, + waitForEffectiveTime: Boolean, + )(implicit traceContext: TraceContext): Option[Future[Unit]] = + parent.awaitTimestamp(timestamp, waitForEffectiveTime) + + override def awaitTimestampUS( + timestamp: CantonTimestamp, + waitForEffectiveTime: Boolean, + )(implicit traceContext: TraceContext): Option[FutureUnlessShutdown[Unit]] = + parent.awaitTimestampUS(timestamp, waitForEffectiveTime) + + override def approximateTimestamp: CantonTimestamp = parent.approximateTimestamp + + override def currentSnapshotApproximation(implicit + traceContext: TraceContext + ): TopologySnapshotLoader = trySnapshot(approximateTimestamp) + + override def topologyKnownUntilTimestamp: CantonTimestamp = parent.topologyKnownUntilTimestamp + + override def await(condition: TopologySnapshot => Future[Boolean], timeout: Duration)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[ + Boolean + ] = // we use our implementation such that we can benefit from cached data + parent.scheduleAwait(condition(currentSnapshotApproximation), timeout) + + override private[topology] def scheduleAwait(condition: => Future[Boolean], timeout: Duration) = + parent.scheduleAwait(condition, timeout) + + override def close(): Unit = { + parent.close() + } + + override def numPendingChanges: Int = parent.numPendingChanges +} + +final class CachingDomainTopologyClientOld( + clock: Clock, + parent: DomainTopologyClientWithInitOld, + cachingConfigs: CachingConfigs, + batchingConfig: BatchingConfig, + timeouts: ProcessingTimeout, + futureSupervisor: FutureSupervisor, + loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext) + extends BaseCachingDomainTopologyClient( + clock, + parent, + cachingConfigs, + batchingConfig, + timeouts, + futureSupervisor, + loggerFactory, + ) + with DomainTopologyClientWithInitOld { + override def observed( + sequencedTimestamp: SequencedTime, + effectiveTimestamp: EffectiveTime, + sequencerCounter: SequencerCounter, + transactions: Seq[SignedTopologyTransaction[TopologyChangeOp]], + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + if (transactions.nonEmpty) { + // if there is a transaction, we insert the effective timestamp as a snapshot + appendSnapshot(effectiveTimestamp.value) + } else if (snapshots.get().isEmpty) { + // if we haven't seen any snapshot yet, we use the sequencer time to seed the first snapshot + appendSnapshot(sequencedTimestamp.value) + } + parent.observed(sequencedTimestamp, effectiveTimestamp, sequencerCounter, transactions) + } + +} + +final class CachingDomainTopologyClientX( + clock: Clock, + parent: DomainTopologyClientWithInitX, + cachingConfigs: CachingConfigs, + batchingConfig: BatchingConfig, + timeouts: ProcessingTimeout, + futureSupervisor: FutureSupervisor, + loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext) + extends BaseCachingDomainTopologyClient( + clock, + parent, + cachingConfigs, + batchingConfig, + timeouts, + futureSupervisor, + loggerFactory, + ) + with DomainTopologyClientWithInitX { + override def observed( + sequencedTimestamp: SequencedTime, + effectiveTimestamp: EffectiveTime, + sequencerCounter: SequencerCounter, + transactions: Seq[GenericSignedTopologyTransactionX], + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + if (transactions.nonEmpty) { + // if there is a transaction, we insert the effective timestamp as a snapshot + appendSnapshot(effectiveTimestamp.value) + } else if (snapshots.get().isEmpty) { + // if we haven't seen any snapshot yet, we use the sequencer time to seed the first snapshot + appendSnapshot(sequencedTimestamp.value) + } + parent.observed(sequencedTimestamp, effectiveTimestamp, sequencerCounter, transactions) + } + +} + +object CachingDomainTopologyClient { + + def create( + clock: Clock, + domainId: DomainId, + protocolVersion: ProtocolVersion, + store: TopologyStore[TopologyStoreId.DomainStore], + initKeys: Map[Member, Seq[SigningPublicKey]], + packageDependencies: PackageId => EitherT[Future, PackageId, Set[PackageId]], + cachingConfigs: CachingConfigs, + batchingConfig: BatchingConfig, + timeouts: ProcessingTimeout, + futureSupervisor: FutureSupervisor, + loggerFactory: NamedLoggerFactory, + )(implicit + executionContext: ExecutionContext, + traceContext: TraceContext, + ): Future[CachingDomainTopologyClientOld] = { + + val dbClient = + new StoreBasedDomainTopologyClient( + clock, + domainId, + protocolVersion, + store, + initKeys, + packageDependencies, + timeouts, + futureSupervisor, + loggerFactory, + ) + val caching = + new CachingDomainTopologyClientOld( + clock, + dbClient, + cachingConfigs, + batchingConfig, + timeouts, + futureSupervisor, + loggerFactory, + ) + store.maxTimestamp().map { x => + x.foreach { case (_, effective) => + caching + .updateHead(effective, effective.toApproximate, potentialTopologyChange = true) + } + caching + } + } + def createX( + clock: Clock, + domainId: DomainId, + protocolVersion: ProtocolVersion, + store: TopologyStoreX[TopologyStoreId.DomainStore], + packageDependencies: PackageId => EitherT[Future, PackageId, Set[PackageId]], + cachingConfigs: CachingConfigs, + batchingConfig: BatchingConfig, + timeouts: ProcessingTimeout, + futureSupervisor: FutureSupervisor, + loggerFactory: NamedLoggerFactory, + )(implicit + executionContext: ExecutionContext, + traceContext: TraceContext, + ): Future[CachingDomainTopologyClientX] = { + val dbClient = + new StoreBasedDomainTopologyClientX( + clock, + domainId, + protocolVersion, + store, + packageDependencies, + timeouts, + futureSupervisor, + loggerFactory, + ) + val caching = + new CachingDomainTopologyClientX( + clock, + dbClient, + cachingConfigs, + batchingConfig, + timeouts, + futureSupervisor, + loggerFactory, + ) + store.maxTimestamp().map { x => + x.foreach { case (_, effective) => + caching + .updateHead(effective, effective.toApproximate, potentialTopologyChange = true) + } + caching + } + } +} + +/** simple wrapper class in order to "override" the timestamp we are returning here */ +private class ForwardingTopologySnapshotClient( + override val timestamp: CantonTimestamp, + parent: TopologySnapshotLoader, + val loggerFactory: NamedLoggerFactory, +)(implicit val executionContext: ExecutionContext) + extends TopologySnapshotLoader { + + override def referenceTime: CantonTimestamp = parent.timestamp + override def participants(): Future[Seq[(ParticipantId, ParticipantPermission)]] = + parent.participants() + override def allKeys(owner: Member): Future[KeyCollection] = parent.allKeys(owner) + override def findParticipantState( + participantId: ParticipantId + ): Future[Option[ParticipantAttributes]] = parent.findParticipantState(participantId) + override def loadParticipantStates( + participants: Seq[ParticipantId] + ): Future[Map[ParticipantId, ParticipantAttributes]] = parent.loadParticipantStates(participants) + override private[client] def loadActiveParticipantsOf( + party: PartyId, + participantStates: Seq[ParticipantId] => Future[Map[ParticipantId, ParticipantAttributes]], + ): Future[PartyInfo] = + parent.loadActiveParticipantsOf(party, participantStates) + override def findParticipantCertificate(participantId: ParticipantId)(implicit + traceContext: TraceContext + ): Future[Option[LegalIdentityClaimEvidence.X509Cert]] = + parent.findParticipantCertificate(participantId) + + override def inspectKeys( + filterOwner: String, + filterOwnerType: Option[MemberCode], + limit: Int, + ): Future[Map[Member, KeyCollection]] = + parent.inspectKeys(filterOwner, filterOwnerType, limit) + override def inspectKnownParties( + filterParty: String, + filterParticipant: String, + limit: Int, + ): Future[Set[PartyId]] = + parent.inspectKnownParties(filterParty, filterParticipant, limit) + + override def findUnvettedPackagesOrDependencies( + participantId: ParticipantId, + packages: Set[PackageId], + ): EitherT[Future, PackageId, Set[PackageId]] = + parent.findUnvettedPackagesOrDependencies(participantId, packages) + + override private[client] def loadUnvettedPackagesOrDependencies( + participant: ParticipantId, + packageId: PackageId, + ): EitherT[Future, PackageId, Set[PackageId]] = + parent.loadUnvettedPackagesOrDependencies(participant, packageId) + + /** returns the list of currently known mediators */ + override def mediatorGroups(): Future[Seq[MediatorGroup]] = parent.mediatorGroups() + + /** returns the sequencer group if known */ + override def sequencerGroup(): Future[Option[SequencerGroup]] = parent.sequencerGroup() + + override def allMembers(): Future[Set[Member]] = parent.allMembers() + + override def isMemberKnown(member: Member): Future[Boolean] = + parent.isMemberKnown(member) + + override def findDynamicDomainParameters()(implicit + traceContext: TraceContext + ): Future[Either[String, DynamicDomainParametersWithValidity]] = + parent.findDynamicDomainParameters() + + /** List all the dynamic domain parameters (past and current) */ + override def listDynamicDomainParametersChanges()(implicit + traceContext: TraceContext + ): Future[Seq[DynamicDomainParametersWithValidity]] = + parent.listDynamicDomainParametersChanges() + + override private[client] def loadBatchActiveParticipantsOf( + parties: Seq[PartyId], + loadParticipantStates: Seq[ParticipantId] => Future[Map[ParticipantId, ParticipantAttributes]], + ) = parent.loadBatchActiveParticipantsOf(parties, loadParticipantStates) + + override def trafficControlStatus( + members: Seq[Member] + ): Future[Map[Member, Option[MemberTrafficControlState]]] = + parent.trafficControlStatus(members) + + /** Returns the Authority-Of delegations for consortium parties. Non-consortium parties delegate to themselves + * with threshold one + */ + override def authorityOf( + parties: Set[LfPartyId] + ): Future[PartyTopologySnapshotClient.AuthorityOfResponse] = parent.authorityOf(parties) +} + +class CachingTopologySnapshot( + parent: TopologySnapshotLoader, + cachingConfigs: CachingConfigs, + batchingConfig: BatchingConfig, + val loggerFactory: NamedLoggerFactory, +)(implicit + val executionContext: ExecutionContext +) extends TopologySnapshotLoader + with NamedLogging + with NoTracing { + + override def timestamp: CantonTimestamp = parent.timestamp + + private val partyCache = cachingConfigs.partyCache + .buildScaffeine() + .buildAsyncFuture[PartyId, PartyInfo]( + loader = party => parent.loadActiveParticipantsOf(party, loadParticipantStates), + allLoader = + Some(parties => parent.loadBatchActiveParticipantsOf(parties.toSeq, loadParticipantStates)), + ) + + private val participantCache = + cachingConfigs.participantCache + .buildScaffeine() + .buildAsyncFuture[ParticipantId, Option[ParticipantAttributes]](parent.findParticipantState) + private val keyCache = cachingConfigs.keyCache + .buildScaffeine() + .buildAsyncFuture[Member, KeyCollection](parent.allKeys) + + private val packageVettingCache = cachingConfigs.packageVettingCache + .buildScaffeine() + .buildAsyncFuture[(ParticipantId, PackageId), Either[PackageId, Set[PackageId]]](x => + loadUnvettedPackagesOrDependencies(x._1, x._2).value + ) + + private val mediatorsCache = new AtomicReference[Option[Future[Seq[MediatorGroup]]]](None) + + private val sequencerGroupCache = + new AtomicReference[Option[Future[Option[SequencerGroup]]]](None) + + private val allMembersCache = new AtomicReference[Option[Future[Set[Member]]]](None) + private val memberCache = cachingConfigs.memberCache + .buildScaffeine() + .buildAsyncFuture[Member, Boolean](parent.isMemberKnown) + + private val domainParametersCache = + new AtomicReference[Option[Future[Either[String, DynamicDomainParametersWithValidity]]]](None) + + private val domainParametersChangesCache = + new AtomicReference[ + Option[Future[Seq[DynamicDomainParametersWithValidity]]] + ](None) + + private val domainTrafficControlStateCache = cachingConfigs.trafficStatusCache + .buildScaffeine() + .buildAsyncFuture[Member, Option[MemberTrafficControlState]]( + loader = member => + parent + .trafficControlStatus(Seq(member)) + .map(_.get(member).flatten), + allLoader = Some(members => parent.trafficControlStatus(members.toSeq)), + ) + + private val authorityOfCache = cachingConfigs.partyCache + .buildScaffeine() + .buildAsyncFuture[Set[LfPartyId], PartyTopologySnapshotClient.AuthorityOfResponse]( + loader = party => parent.authorityOf(party) + ) + + override def participants(): Future[Seq[(ParticipantId, ParticipantPermission)]] = + parent.participants() + + override def allKeys(owner: Member): Future[KeyCollection] = keyCache.get(owner) + + override def findParticipantState( + participantId: ParticipantId + ): Future[Option[ParticipantAttributes]] = + participantCache.get(participantId) + + override def loadActiveParticipantsOf( + party: PartyId, + participantStates: Seq[ParticipantId] => Future[Map[ParticipantId, ParticipantAttributes]], + ): Future[PartyInfo] = + partyCache.get(party) + + override private[client] def loadBatchActiveParticipantsOf( + parties: Seq[PartyId], + loadParticipantStates: Seq[ParticipantId] => Future[Map[ParticipantId, ParticipantAttributes]], + ) = { + // split up the request into separate chunks so that we don't block the cache for too long + // when loading very large batches + MonadUtil + .batchedSequentialTraverse(batchingConfig.parallelism, batchingConfig.maxItemsInSqlClause)( + parties + )( + partyCache.getAll(_).map(_.toSeq) + ) + .map(_.toMap) + } + + override def loadParticipantStates( + participants: Seq[ParticipantId] + ): Future[Map[ParticipantId, ParticipantAttributes]] = + participants + .parTraverse(participant => participantState(participant).map((participant, _))) + .map(_.toMap) + + override def findParticipantCertificate( + participantId: ParticipantId + )(implicit traceContext: TraceContext): Future[Option[LegalIdentityClaimEvidence.X509Cert]] = { + // This one is not cached as we don't need during processing + parent.findParticipantCertificate(participantId) + } + + override def findUnvettedPackagesOrDependencies( + participantId: ParticipantId, + packages: Set[PackageId], + ): EitherT[Future, PackageId, Set[PackageId]] = + findUnvettedPackagesOrDependenciesUsingLoader( + participantId, + packages, + (x, y) => EitherT(packageVettingCache.get((x, y))), + ) + + private[client] def loadUnvettedPackagesOrDependencies( + participant: ParticipantId, + packageId: PackageId, + ): EitherT[Future, PackageId, Set[PackageId]] = + parent.loadUnvettedPackagesOrDependencies(participant, packageId) + + override def inspectKeys( + filterOwner: String, + filterOwnerType: Option[MemberCode], + limit: Int, + ): Future[Map[Member, KeyCollection]] = + parent.inspectKeys(filterOwner, filterOwnerType, limit) + + override def inspectKnownParties( + filterParty: String, + filterParticipant: String, + limit: Int, + ): Future[Set[PartyId]] = + parent.inspectKnownParties(filterParty, filterParticipant, limit) + + /** returns the list of currently known mediators */ + override def mediatorGroups(): Future[Seq[MediatorGroup]] = + getAndCache(mediatorsCache, parent.mediatorGroups()) + + /** returns the sequencer group if known */ + override def sequencerGroup(): Future[Option[SequencerGroup]] = + getAndCache(sequencerGroupCache, parent.sequencerGroup()) + + /** returns the set of all known members */ + override def allMembers(): Future[Set[Member]] = getAndCache(allMembersCache, parent.allMembers()) + + override def isMemberKnown(member: Member): Future[Boolean] = + memberCache.get(member) + + /** Returns the value if it is present in the cache. Otherwise, use the + * `getter` to fetch it and cache the result. + */ + private def getAndCache[T]( + cache: AtomicReference[Option[Future[T]]], + getter: => Future[T], + ): Future[T] = { + val promise = Promise[T]() + val previousO = cache.getAndSet(Some(promise.future)) + promise.completeWith(previousO.getOrElse(getter)) + promise.future + } + + override def findDynamicDomainParameters()(implicit + traceContext: TraceContext + ): Future[Either[String, DynamicDomainParametersWithValidity]] = + getAndCache(domainParametersCache, parent.findDynamicDomainParameters()) + + /** List all the dynamic domain parameters (past and current) */ + override def listDynamicDomainParametersChanges()(implicit + traceContext: TraceContext + ): Future[Seq[DynamicDomainParametersWithValidity]] = + getAndCache(domainParametersChangesCache, parent.listDynamicDomainParametersChanges()) + + /** Returns the Authority-Of delegations for consortium parties. Non-consortium parties delegate to themselves + * with threshold one + */ + override def authorityOf( + parties: Set[LfPartyId] + ): Future[PartyTopologySnapshotClient.AuthorityOfResponse] = + authorityOfCache.get(parties) + + override def trafficControlStatus( + members: Seq[Member] + ): Future[Map[Member, Option[MemberTrafficControlState]]] = { + domainTrafficControlStateCache.getAll(members) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/DomainTrafficStateClient.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/DomainTrafficStateClient.scala new file mode 100644 index 0000000000..04a9a672b3 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/DomainTrafficStateClient.scala @@ -0,0 +1,24 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.client + +import com.digitalasset.canton.config.RequireTypes.PositiveLong +import com.digitalasset.canton.topology.Member + +import scala.concurrent.Future + +final case class MemberTrafficControlState(totalExtraTrafficLimit: PositiveLong) + +/** The subset of the topology client providing traffic state information */ +trait DomainTrafficControlStateClient { + this: BaseTopologySnapshotClient => + + /** Return the traffic control states for the members specified + * @param members for which to return the traffic state + * @return all input members with their optional traffic state + */ + def trafficControlStatus( + members: Seq[Member] + ): Future[Map[Member, Option[MemberTrafficControlState]]] +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala new file mode 100644 index 0000000000..b887065f42 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala @@ -0,0 +1,835 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.client + +import cats.Monad +import cats.data.EitherT +import cats.syntax.functor.* +import cats.syntax.functorFilter.* +import cats.syntax.parallel.* +import com.daml.lf.data.Ref.PackageId +import com.digitalasset.canton.concurrent.HasFutureSupervision +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.crypto.{EncryptionPublicKey, SigningPublicKey} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.NamedLogging +import com.digitalasset.canton.protocol.{ + DynamicDomainParameters, + DynamicDomainParametersWithValidity, +} +import com.digitalasset.canton.sequencing.TrafficControlParameters +import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.client.PartyTopologySnapshotClient.{ + AuthorityOfResponse, + PartyInfo, +} +import com.digitalasset.canton.topology.processing.{ + TopologyTransactionProcessingSubscriber, + TopologyTransactionProcessingSubscriberCommon, + TopologyTransactionProcessingSubscriberX, +} +import com.digitalasset.canton.topology.transaction.LegalIdentityClaimEvidence.X509Cert +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.FutureInstances.* +import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.{LfPartyId, checked} + +import scala.Ordered.orderingToOrdered +import scala.collection.concurrent.TrieMap +import scala.concurrent.duration.* +import scala.concurrent.{ExecutionContext, Future} + +// architecture-handbook-entry-begin: IdentityProvidingServiceClient + +/** Client side API for the Identity Providing Service. This API is used to get information about the layout of + * the domains, such as party-participant relationships, used encryption and signing keys, + * package information, participant states, domain parameters, and so on. + */ +class IdentityProvidingServiceClient { + + private val domains = TrieMap.empty[DomainId, DomainTopologyClient] + + def add(domainClient: DomainTopologyClient): this.type = { + domains += (domainClient.domainId -> domainClient) + this + } + + def allDomains: Iterable[DomainTopologyClient] = domains.values + + def tryForDomain(domain: DomainId): DomainTopologyClient = + domains.getOrElse(domain, sys.error("unknown domain " + domain.toString)) + + def forDomain(domain: DomainId): Option[DomainTopologyClient] = domains.get(domain) + +} + +trait TopologyClientApi[+T] { this: HasFutureSupervision => + + /** The domain this client applies to */ + def domainId: DomainId + + /** Our current snapshot approximation + * + * As topology transactions are future dated (to prevent sequential bottlenecks), we do + * have to "guess" the current state, as time is defined by the sequencer after + * we've sent the transaction. Therefore, this function will return the + * best snapshot approximation known. + * + * The snapshot returned by this method should be used when preparing a transaction or transfer request (Phase 1). + * It must not be used when validating a request (Phase 2 - 7); instead, use one of the `snapshot` methods with the request timestamp. + */ + def currentSnapshotApproximation(implicit traceContext: TraceContext): T + + /** Possibly future dated head snapshot + * + * As we future date topology transactions, the head snapshot is our latest knowledge of the topology state, + * but as it can be still future dated, we need to be careful when actually using it: the state might not + * yet be active, as the topology transactions are future dated. Therefore, do not act towards the sequencer + * using this snapshot, but use the currentSnapshotApproximation instead. + */ + def headSnapshot(implicit traceContext: TraceContext): T = checked( + trySnapshot(topologyKnownUntilTimestamp) + ) + + /** The approximate timestamp + * + * This is either the last observed sequencer timestamp OR the effective timestamp after we observed + * the time difference of (effective - sequencer = epsilon) to elapse + */ + def approximateTimestamp: CantonTimestamp + + /** The most recently observed effective timestamp + * + * The effective timestamp is sequencer_time + epsilon(sequencer_time), where + * epsilon is given by the topology change delay time, defined using the domain parameters. + * + * This is the highest timestamp for which we can serve snapshots + */ + def topologyKnownUntilTimestamp: CantonTimestamp + + /** Returns true if the topology information at the passed timestamp is already known */ + def snapshotAvailable(timestamp: CantonTimestamp): Boolean + + /** Returns the topology information at a certain point in time + * + * Use this method if you are sure to be synchronized with the topology state updates. + * The method will block & wait for an update, but emit a warning if it is not available + * + * The snapshot returned by this method should be used for validating transaction and transfer requests (Phase 2 - 7). + * Use the request timestamp as parameter for this method. + * Do not use a response or result timestamp, because all validation steps must use the same topology snapshot. + */ + def snapshot(timestamp: CantonTimestamp)(implicit traceContext: TraceContext): Future[T] + def snapshotUS(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[T] + + /** Waits until a snapshot is available + * + * The snapshot returned by this method should be used for validating transaction and transfer requests (Phase 2 - 7). + * Use the request timestamp as parameter for this method. + * Do not use a response or result timestamp, because all validation steps must use the same topology snapshot. + */ + def awaitSnapshot(timestamp: CantonTimestamp)(implicit traceContext: TraceContext): Future[T] + + /** Supervised version of [[awaitSnapshot]] */ + def awaitSnapshotSupervised(description: => String, warnAfter: Duration = 30.seconds)( + timestamp: CantonTimestamp + )(implicit + traceContext: TraceContext + ): Future[T] = supervised(description, warnAfter)(awaitSnapshot(timestamp)) + + /** Shutdown safe version of await snapshot */ + def awaitSnapshotUS(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[T] + + /** Supervised version of [[awaitSnapshotUS]] */ + def awaitSnapshotUSSupervised(description: => String, warnAfter: Duration = 30.seconds)( + timestamp: CantonTimestamp + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[T] = supervisedUS(description, warnAfter)(awaitSnapshotUS(timestamp)) + + /** Returns the topology information at a certain point in time + * + * Fails with an exception if the state is not yet known. + * + * The snapshot returned by this method should be used for validating transaction and transfer requests (Phase 2 - 7). + * Use the request timestamp as parameter for this method. + * Do not use a response or result timestamp, because all validation steps must use the same topology snapshot. + */ + def trySnapshot(timestamp: CantonTimestamp)(implicit traceContext: TraceContext): T + + /** Returns an optional future which will complete when the timestamp has been observed + * + * If the timestamp is already observed, we return None. + * + * Note that this function allows to wait for effective time (true) and sequenced time (false). + * If we wait for effective time, we wait until the topology snapshot for that given + * point in time is known. As we future date topology transactions (to avoid bottlenecks), + * this might be before we actually observed a sequencing timestamp. + */ + def awaitTimestamp( + timestamp: CantonTimestamp, + waitForEffectiveTime: Boolean, + )(implicit traceContext: TraceContext): Option[Future[Unit]] + + def awaitTimestampUS( + timestamp: CantonTimestamp, + waitForEffectiveTime: Boolean, + )(implicit traceContext: TraceContext): Option[FutureUnlessShutdown[Unit]] + +} + +/** The client that provides the topology information on a per domain basis + */ +trait DomainTopologyClient extends TopologyClientApi[TopologySnapshot] with AutoCloseable { + this: HasFutureSupervision => + + /** Wait for a condition to become true according to the current snapshot approximation + * + * @return true if the condition became true, false if it timed out + */ + def await(condition: TopologySnapshot => Future[Boolean], timeout: Duration)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Boolean] + +} + +trait BaseTopologySnapshotClient { + + protected implicit def executionContext: ExecutionContext + + /** The official timestamp corresponding to this snapshot */ + def timestamp: CantonTimestamp + + /** Internally used reference time (representing when the last change happened that affected this snapshot) */ + def referenceTime: CantonTimestamp = timestamp + +} + +/** The subset of the topology client providing party to participant mapping information */ +trait PartyTopologySnapshotClient { + + this: BaseTopologySnapshotClient => + + /** Load the set of active participants for the given parties */ + def activeParticipantsOfParties( + parties: Seq[LfPartyId] + ): Future[Map[LfPartyId, Set[ParticipantId]]] + + def activeParticipantsOfPartiesWithAttributes( + parties: Seq[LfPartyId] + ): Future[Map[LfPartyId, Map[ParticipantId, ParticipantAttributes]]] + + /** Returns the set of active participants the given party is represented by as of the snapshot timestamp + * + * Should never return a PartyParticipantRelationship where ParticipantPermission is DISABLED. + */ + def activeParticipantsOf( + party: LfPartyId + ): Future[Map[ParticipantId, ParticipantAttributes]] + + /** Returns Right if all parties have at least an active participant passing the check. Otherwise, all parties not passing are passed as Left */ + def allHaveActiveParticipants( + parties: Set[LfPartyId], + check: (ParticipantPermission => Boolean) = _.isActive, + ): EitherT[Future, Set[LfPartyId], Unit] + + /** Returns the consortium thresholds (how many votes from different participants that host the consortium party + * are required for the confirmation to become valid). For normal parties returns 1. + */ + def consortiumThresholds(parties: Set[LfPartyId]): Future[Map[LfPartyId, PositiveInt]] + + /** Returns the Authority-Of delegations for consortium parties. Non-consortium parties delegate to themselves + * with threshold one + */ + def authorityOf(parties: Set[LfPartyId]): Future[AuthorityOfResponse] + + /** Returns true if there is at least one participant that satisfies the predicate */ + def isHostedByAtLeastOneParticipantF( + party: LfPartyId, + check: ParticipantAttributes => Boolean, + ): Future[Boolean] + + /** Returns the participant permission for that particular participant (if there is one) */ + def hostedOn( + partyId: LfPartyId, + participantId: ParticipantId, + ): Future[Option[ParticipantAttributes]] + + /** Returns true of all given party ids are hosted on a certain participant */ + def allHostedOn( + partyIds: Set[LfPartyId], + participantId: ParticipantId, + permissionCheck: ParticipantAttributes => Boolean = _.permission.isActive, + ): Future[Boolean] + + /** Returns whether a participant can confirm on behalf of a party. */ + def canConfirm( + participant: ParticipantId, + party: LfPartyId, + requiredTrustLevel: TrustLevel = TrustLevel.Ordinary, + ): Future[Boolean] + + /** Returns all active participants of all the given parties. Returns a Left if some of the parties don't have active + * participants, in which case the parties with missing active participants are returned. Note that it will return + * an empty set as a Right when given an empty list of parties. + */ + def activeParticipantsOfAll( + parties: List[LfPartyId] + ): EitherT[Future, Set[LfPartyId], Set[ParticipantId]] + + def partiesWithGroupAddressing( + parties: Seq[LfPartyId] + ): Future[Set[LfPartyId]] + + /** Returns a list of all known parties on this domain */ + def inspectKnownParties( + filterParty: String, + filterParticipant: String, + limit: Int, + ): Future[ + Set[PartyId] + ] // TODO(#14048): Decide on whether to standarize APIs on LfPartyId or PartyId and unify interfaces + +} + +object PartyTopologySnapshotClient { + final case class AuthorityOfDelegation(expected: Set[LfPartyId], threshold: PositiveInt) + + def nonConsortiumPartyDelegation(partyId: LfPartyId): AuthorityOfDelegation = + AuthorityOfDelegation(Set(partyId), PositiveInt.one) + + final case class AuthorityOfResponse(response: Map[LfPartyId, AuthorityOfDelegation]) + + final case class PartyInfo( + groupAddressing: Boolean, + threshold: PositiveInt, // > 1 for consortium parties + participants: Map[ParticipantId, ParticipantAttributes], + ) + + object PartyInfo { + def nonConsortiumPartyInfo(participants: Map[ParticipantId, ParticipantAttributes]): PartyInfo = + PartyInfo(groupAddressing = false, threshold = PositiveInt.one, participants = participants) + + lazy val EmptyPartyInfo: PartyInfo = nonConsortiumPartyInfo(Map.empty) + } +} + +/** The subset of the topology client, providing signing and encryption key information */ +trait KeyTopologySnapshotClient { + + this: BaseTopologySnapshotClient => + + /** returns newest signing public key */ + def signingKey(owner: Member): Future[Option[SigningPublicKey]] + + /** returns all signing keys */ + def signingKeys(owner: Member): Future[Seq[SigningPublicKey]] + + /** returns newest encryption public key */ + def encryptionKey(owner: Member): Future[Option[EncryptionPublicKey]] + + /** returns all encryption keys */ + def encryptionKeys(owner: Member): Future[Seq[EncryptionPublicKey]] + + /** Returns a list of all known parties on this domain */ + def inspectKeys( + filterOwner: String, + filterOwnerType: Option[MemberCode], + limit: Int, + ): Future[Map[Member, KeyCollection]] + +} + +/** The subset of the topology client, providing participant state information */ +trait ParticipantTopologySnapshotClient { + + this: BaseTopologySnapshotClient => + + // used by domain to fetch all participants + @Deprecated(since = "3.0") + def participants(): Future[Seq[(ParticipantId, ParticipantPermission)]] + + /** Checks whether the provided participant exists and is active */ + def isParticipantActive(participantId: ParticipantId): Future[Boolean] + +} + +/** The subset of the topology client providing mediator state information */ +trait MediatorDomainStateClient { + this: BaseTopologySnapshotClient => + + /** returns the list of currently known mediators */ + @deprecated(since = "2.7", message = "Use mediatorGroups instead.") + final def mediators(): Future[Seq[MediatorId]] = + mediatorGroups().map(_.flatMap(mg => mg.active ++ mg.passive)) + + def mediatorGroups(): Future[Seq[MediatorGroup]] + + def isMediatorActive(mediatorId: MediatorId): Future[Boolean] = + mediatorGroups().map(_.exists { group => + // Note: mediator in group.passive should still be able to authenticate and process MediatorResponses, + // only sending the verdicts is disabled and verdicts from a passive mediator should not pass the checks + group.isActive && (group.active.contains(mediatorId) || group.passive.contains(mediatorId)) + }) + + def isMediatorActive(mediator: MediatorRef): Future[Boolean] = { + mediator match { + case MediatorRef.Single(mediatorId) => + isMediatorActive(mediatorId) + case MediatorRef.Group(mediatorsOfDomain) => + mediatorGroup(mediatorsOfDomain.group).map { + case Some(group) => group.isActive + case None => false + } + } + } + + def mediatorGroupsOfAll( + groups: Seq[MediatorGroupIndex] + ): EitherT[Future, Seq[MediatorGroupIndex], Seq[MediatorGroup]] = + if (groups.isEmpty) EitherT.rightT(Seq.empty) + else + EitherT( + mediatorGroups() + .map { mediatorGroups => + val existingGroupIndices = mediatorGroups.map(_.index) + val nonExisting = groups.filterNot(existingGroupIndices.contains) + Either.cond( + nonExisting.isEmpty, + mediatorGroups.filter(g => groups.contains(g.index)), + nonExisting, + ) + } + ) + + def mediatorGroup(index: MediatorGroupIndex): Future[Option[MediatorGroup]] = { + mediatorGroups().map(_.find(_.index == index)) + } +} + +/** The subset of the topology client providing sequencer state information */ +trait SequencerDomainStateClient { + this: BaseTopologySnapshotClient => + + /** returns the sequencer group */ + def sequencerGroup(): Future[Option[SequencerGroup]] +} + +// this can be removed with 3.0 +@Deprecated(since = "3.0") +trait CertificateSnapshotClient { + + this: BaseTopologySnapshotClient => + + @Deprecated(since = "3.0.0") + def hasParticipantCertificate(participantId: ParticipantId)(implicit + traceContext: TraceContext + ): Future[Boolean] = + findParticipantCertificate(participantId).map(_.isDefined) + + @Deprecated(since = "3.0.0") + def findParticipantCertificate(participantId: ParticipantId)(implicit + traceContext: TraceContext + ): Future[Option[X509Cert]] + +} + +trait VettedPackagesSnapshotClient { + + this: BaseTopologySnapshotClient => + + /** Returns the set of packages that are not vetted by the given participant + * + * @param participantId the participant for which we want to check the package vettings + * @param packages the set of packages that should be vetted + * @return Right the set of unvetted packages (which is empty if all packages are vetted) + * Left if a package is missing locally such that we can not verify the vetting state of the package dependencies + */ + def findUnvettedPackagesOrDependencies( + participantId: ParticipantId, + packages: Set[PackageId], + ): EitherT[Future, PackageId, Set[PackageId]] + +} + +trait DomainGovernanceSnapshotClient { + this: BaseTopologySnapshotClient with NamedLogging => + + def trafficControlParameters[A]( + protocolVersion: ProtocolVersion + )(implicit tc: TraceContext): Future[Option[TrafficControlParameters]] = { + findDynamicDomainParametersOrDefault(protocolVersion) + .map(_.trafficControlParameters) + } + + def findDynamicDomainParametersOrDefault( + protocolVersion: ProtocolVersion, + warnOnUsingDefault: Boolean = true, + )(implicit traceContext: TraceContext): Future[DynamicDomainParameters] = + findDynamicDomainParameters().map { + case Right(value) => value.parameters + case Left(_) => + if (warnOnUsingDefault) { + logger.warn(s"Unexpectedly using default domain parameters at ${timestamp}") + } + + DynamicDomainParameters.initialValues( + // we must use zero as default change delay parameter, as otherwise static time tests will not work + // however, once the domain has published the initial set of domain parameters, the zero time will be + // adjusted. + topologyChangeDelay = DynamicDomainParameters.topologyChangeDelayIfAbsent, + protocolVersion = protocolVersion, + ) + } + + def findDynamicDomainParameters()(implicit + traceContext: TraceContext + ): Future[Either[String, DynamicDomainParametersWithValidity]] + + /** List all the dynamic domain parameters (past and current) */ + def listDynamicDomainParametersChanges()(implicit + traceContext: TraceContext + ): Future[Seq[DynamicDomainParametersWithValidity]] +} + +trait MembersTopologySnapshotClient { + this: BaseTopologySnapshotClient => + + def allMembers(): Future[Set[Member]] + + def isMemberKnown(member: Member): Future[Boolean] +} + +trait TopologySnapshot + extends PartyTopologySnapshotClient + with BaseTopologySnapshotClient + with ParticipantTopologySnapshotClient + with KeyTopologySnapshotClient + with CertificateSnapshotClient + with VettedPackagesSnapshotClient + with MediatorDomainStateClient + with SequencerDomainStateClient + with DomainTrafficControlStateClient + with DomainGovernanceSnapshotClient + with MembersTopologySnapshotClient { this: BaseTopologySnapshotClient with NamedLogging => } + +// architecture-handbook-entry-end: IdentityProvidingServiceClient + +trait DomainTopologyClientWithInitOld + extends DomainTopologyClientWithInit + with TopologyTransactionProcessingSubscriber + +trait DomainTopologyClientWithInitX + extends DomainTopologyClientWithInit + with TopologyTransactionProcessingSubscriberX + +/** The internal domain topology client interface used for initialisation and efficient processing */ +trait DomainTopologyClientWithInit + extends DomainTopologyClient + with TopologyTransactionProcessingSubscriberCommon + with HasFutureSupervision + with NamedLogging { + + implicit override protected def executionContext: ExecutionContext + + /** current number of changes waiting to become effective */ + def numPendingChanges: Int + + /** Overloaded recent snapshot returning derived type */ + override def currentSnapshotApproximation(implicit + traceContext: TraceContext + ): TopologySnapshotLoader = trySnapshot(approximateTimestamp) + + override def trySnapshot(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): TopologySnapshotLoader + + /** Overloaded snapshot returning derived type */ + override def snapshot( + timestamp: CantonTimestamp + )(implicit traceContext: TraceContext): Future[TopologySnapshotLoader] = { + snapshotInternal(timestamp)((timestamp, waitForEffectiveTime) => + this.awaitTimestamp(timestamp, waitForEffectiveTime) + ) + } + + /** Overloaded snapshot returning derived type */ + override def snapshotUS( + timestamp: CantonTimestamp + )(implicit traceContext: TraceContext): FutureUnlessShutdown[TopologySnapshotLoader] = { + snapshotInternal[FutureUnlessShutdown](timestamp)( + (timestamp, waitForEffectiveTime) => this.awaitTimestampUS(timestamp, waitForEffectiveTime), + // Do not log a warning if we get a shutdown future + logWarning = f => f != FutureUnlessShutdown.abortedDueToShutdown, + ) + } + + private def snapshotInternal[F[_]]( + timestamp: CantonTimestamp + )( + awaitTimestampFn: (CantonTimestamp, Boolean) => Option[F[Unit]], + logWarning: F[Unit] => Boolean = Function.const(true), + )(implicit traceContext: TraceContext, monad: Monad[F]): F[TopologySnapshotLoader] = { + val syncF = awaitTimestampFn(timestamp, true) match { + case None => monad.unit + // No need to log a warning if the future we get is due to a shutdown in progress + case Some(fut) => + if (logWarning(fut)) { + logger.warn( + s"Unsynchronized access to topology snapshot at $timestamp, topology known until=$topologyKnownUntilTimestamp" + ) + } + fut + } + syncF.map(_ => trySnapshot(timestamp)) + } + + override def awaitSnapshot(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[TopologySnapshot] = + awaitTimestamp(timestamp, waitForEffectiveTime = true) + .getOrElse(Future.unit) + .map(_ => trySnapshot(timestamp)) + + override def awaitSnapshotUS(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[TopologySnapshot] = + awaitTimestampUS(timestamp, waitForEffectiveTime = true) + .getOrElse(FutureUnlessShutdown.unit) + .map(_ => trySnapshot(timestamp)) + + /** internal await implementation used to schedule state evaluations after topology updates */ + private[topology] def scheduleAwait( + condition: => Future[Boolean], + timeout: Duration, + ): FutureUnlessShutdown[Boolean] + +} + +/** An internal interface with a simpler lookup function which can be implemented efficiently with caching and reading from a store */ +private[client] trait KeyTopologySnapshotClientLoader extends KeyTopologySnapshotClient { + this: BaseTopologySnapshotClient => + + /** abstract loading function used to obtain the full key collection for a key owner */ + def allKeys(owner: Member): Future[KeyCollection] + + override def signingKey(owner: Member): Future[Option[SigningPublicKey]] = + allKeys(owner).map(_.signingKeys.lastOption) + + override def signingKeys(owner: Member): Future[Seq[SigningPublicKey]] = + allKeys(owner).map(_.signingKeys) + + override def encryptionKey(owner: Member): Future[Option[EncryptionPublicKey]] = + allKeys(owner).map(_.encryptionKeys.lastOption) + + override def encryptionKeys(owner: Member): Future[Seq[EncryptionPublicKey]] = + allKeys(owner).map(_.encryptionKeys) + +} + +/** An internal interface with a simpler lookup function which can be implemented efficiently with caching and reading from a store */ +private[client] trait ParticipantTopologySnapshotLoader extends ParticipantTopologySnapshotClient { + + this: BaseTopologySnapshotClient => + + override def isParticipantActive(participantId: ParticipantId): Future[Boolean] = + participantState(participantId).map(_.permission.isActive) + + def findParticipantState(participantId: ParticipantId): Future[Option[ParticipantAttributes]] + + def participantState(participantId: ParticipantId): Future[ParticipantAttributes] = + findParticipantState(participantId).map( + _.getOrElse(ParticipantAttributes(ParticipantPermission.Disabled, TrustLevel.Ordinary)) + ) + + /** abstract loading function used to load the participant state for the given set of participant-ids */ + def loadParticipantStates( + participants: Seq[ParticipantId] + ): Future[Map[ParticipantId, ParticipantAttributes]] + +} + +private[client] trait PartyTopologySnapshotBaseClient { + + this: PartyTopologySnapshotClient with BaseTopologySnapshotClient => + + override def allHaveActiveParticipants( + parties: Set[LfPartyId], + check: (ParticipantPermission => Boolean) = _.isActive, + ): EitherT[Future, Set[LfPartyId], Unit] = { + val fetchedF = activeParticipantsOfPartiesWithAttributes(parties.toSeq) + EitherT( + fetchedF + .map { fetched => + fetched.foldLeft(Set.empty[LfPartyId]) { case (acc, (party, relationships)) => + if (relationships.exists(x => check(x._2.permission))) + acc + else acc + party + } + } + .map { res => + if (res.isEmpty) Right(()) + else Left(res) + } + ) + } + + override def isHostedByAtLeastOneParticipantF( + party: LfPartyId, + check: ParticipantAttributes => Boolean, + ): Future[Boolean] = + activeParticipantsOf(party).map(_.values.exists(check)) + + override def hostedOn( + partyId: LfPartyId, + participantId: ParticipantId, + ): Future[Option[ParticipantAttributes]] = + // TODO(i4930) implement directly, must not return DISABLED + activeParticipantsOf(partyId).map(_.get(participantId)) + + override def allHostedOn( + partyIds: Set[LfPartyId], + participantId: ParticipantId, + permissionCheck: ParticipantAttributes => Boolean = _.permission.isActive, + ): Future[Boolean] = + partyIds.toList + .parTraverse(hostedOn(_, participantId).map(_.exists(permissionCheck))) + .map(_.forall(x => x)) + + override def canConfirm( + participant: ParticipantId, + party: LfPartyId, + requiredTrustLevel: TrustLevel = TrustLevel.Ordinary, + ): Future[Boolean] = + hostedOn(party, participant) + .map( + _.exists(relationship => + relationship.permission.canConfirm && relationship.trustLevel >= requiredTrustLevel + ) + )(executionContext) + + override def activeParticipantsOfAll( + parties: List[LfPartyId] + ): EitherT[Future, Set[LfPartyId], Set[ParticipantId]] = + EitherT(for { + withActiveParticipants <- parties.parTraverse(p => + activeParticipantsOf(p).map(pMap => p -> pMap) + ) + (noActive, allActive) = withActiveParticipants.foldLeft( + Set.empty[LfPartyId] -> Set.empty[ParticipantId] + ) { case ((noActive, allActive), (p, active)) => + (if (active.isEmpty) noActive + p else noActive, allActive.union(active.keySet)) + } + } yield Either.cond(noActive.isEmpty, allActive, noActive)) +} + +private[client] trait PartyTopologySnapshotLoader + extends PartyTopologySnapshotClient + with PartyTopologySnapshotBaseClient { + + this: BaseTopologySnapshotClient with ParticipantTopologySnapshotLoader => + + final override def activeParticipantsOf( + party: LfPartyId + ): Future[Map[ParticipantId, ParticipantAttributes]] = + PartyId + .fromLfParty(party) + .map(loadActiveParticipantsOf(_, loadParticipantStates).map(_.participants)) + .getOrElse(Future.successful(Map())) + + private[client] def loadActiveParticipantsOf( + party: PartyId, + participantStates: Seq[ParticipantId] => Future[Map[ParticipantId, ParticipantAttributes]], + ): Future[PartyInfo] + + final override def activeParticipantsOfParties( + parties: Seq[LfPartyId] + ): Future[Map[LfPartyId, Set[ParticipantId]]] = + loadAndMapPartyInfos(parties, _.participants.keySet) + + final override def activeParticipantsOfPartiesWithAttributes( + parties: Seq[LfPartyId] + ): Future[Map[LfPartyId, Map[ParticipantId, ParticipantAttributes]]] = + loadAndMapPartyInfos(parties, _.participants) + + final override def partiesWithGroupAddressing(parties: Seq[LfPartyId]): Future[Set[LfPartyId]] = + loadAndMapPartyInfos(parties, identity, _.groupAddressing).map(_.keySet) + + final def consortiumThresholds(parties: Set[LfPartyId]): Future[Map[LfPartyId, PositiveInt]] = + loadAndMapPartyInfos(parties.toSeq, _.threshold) + + private def loadAndMapPartyInfos[T]( + lfParties: Seq[LfPartyId], + f: PartyInfo => T, + filter: PartyInfo => Boolean = _ => true, + ): Future[Map[LfPartyId, T]] = + loadBatchActiveParticipantsOf( + lfParties.mapFilter(PartyId.fromLfParty(_).toOption), + loadParticipantStates, + ).map(_.collect { + case (partyId, partyInfo) if filter(partyInfo) => partyId.toLf -> f(partyInfo) + }) + + private[client] def loadBatchActiveParticipantsOf( + parties: Seq[PartyId], + loadParticipantStates: Seq[ParticipantId] => Future[Map[ParticipantId, ParticipantAttributes]], + ): Future[Map[PartyId, PartyInfo]] +} + +trait VettedPackagesSnapshotLoader extends VettedPackagesSnapshotClient { + this: BaseTopologySnapshotClient with PartyTopologySnapshotLoader => + + private[client] def loadUnvettedPackagesOrDependencies( + participant: ParticipantId, + packageId: PackageId, + ): EitherT[Future, PackageId, Set[PackageId]] + + protected def findUnvettedPackagesOrDependenciesUsingLoader( + participantId: ParticipantId, + packages: Set[PackageId], + loader: (ParticipantId, PackageId) => EitherT[Future, PackageId, Set[PackageId]], + ): EitherT[Future, PackageId, Set[PackageId]] = + packages.toList + .parFlatTraverse(packageId => loader(participantId, packageId).map(_.toList)) + .map(_.toSet) + + override def findUnvettedPackagesOrDependencies( + participantId: ParticipantId, + packages: Set[PackageId], + ): EitherT[Future, PackageId, Set[PackageId]] = + findUnvettedPackagesOrDependenciesUsingLoader( + participantId, + packages, + (pid, packId) => loadUnvettedPackagesOrDependencies(pid, packId), + ) + +} + +trait DomainGovernanceSnapshotLoader extends DomainGovernanceSnapshotClient { + this: BaseTopologySnapshotClient with NamedLogging => +} + +/** Loading interface with a more optimal method to read data from a store + * + * The topology information is stored in a particular way. In order to optimise loading and caching + * of the data, we use such loader interfaces, such that we can optimise caching and loading of the + * data while still providing a good and convenient access to the topology information. + */ +trait TopologySnapshotLoader + extends TopologySnapshot + with PartyTopologySnapshotLoader + with BaseTopologySnapshotClient + with ParticipantTopologySnapshotLoader + with KeyTopologySnapshotClientLoader + with VettedPackagesSnapshotLoader + with DomainGovernanceSnapshotLoader + with DomainTrafficControlStateClient + with NamedLogging diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedDomainTopologyClient.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedDomainTopologyClient.scala new file mode 100644 index 0000000000..cef7ff0977 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedDomainTopologyClient.scala @@ -0,0 +1,919 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.client + +import cats.data.EitherT +import cats.syntax.either.* +import cats.syntax.functor.* +import cats.syntax.functorFilter.* +import com.daml.lf.data.Ref.PackageId +import com.daml.nameof.NameOf.functionFullName +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.crypto.SigningPublicKey +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown, UnlessShutdown} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.protocol.DynamicDomainParametersWithValidity +import com.digitalasset.canton.time.{Clock, TimeAwaiter} +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.client.PartyTopologySnapshotClient.{ + PartyInfo, + nonConsortiumPartyDelegation, +} +import com.digitalasset.canton.topology.processing.{ApproximateTime, EffectiveTime, SequencedTime} +import com.digitalasset.canton.topology.store.TopologyStoreId.AuthorizedStore +import com.digitalasset.canton.topology.store.{ + StoredTopologyTransactions, + TimeQuery, + TopologyStore, + TopologyStoreId, +} +import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.tracing.{NoTracing, TraceContext} +import com.digitalasset.canton.util.ErrorUtil +import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.{DiscardOps, LfPartyId, SequencerCounter} + +import java.time.Duration as JDuration +import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} +import scala.collection.immutable.ArraySeq +import scala.concurrent.duration.Duration +import scala.concurrent.{ExecutionContext, Future, Promise} +import scala.util.Success +import scala.util.control.NonFatal + +trait TopologyAwaiter extends FlagCloseable { + + this: DomainTopologyClientWithInit => + + protected def clock: Clock + private val conditions = new AtomicReference[Seq[StateAwait]](Seq.empty) + + override protected def onClosed(): Unit = { + super.onClosed() + shutdownConditions() + } + + private def shutdownConditions(): Unit = { + conditions.updateAndGet { x => + x.foreach(_.promise.trySuccess(UnlessShutdown.AbortedDueToShutdown).discard[Boolean]) + Seq() + }.discard + } + + protected def checkAwaitingConditions()(implicit traceContext: TraceContext): Unit = { + conditions + .get() + .foreach(stateAwait => + try { stateAwait.check() } + catch { + case NonFatal(e) => + logger.error("An exception occurred while checking awaiting conditions.", e) + stateAwait.promise.tryFailure(e).discard[Boolean] + } + ) + } + + private class StateAwait(func: => Future[Boolean]) { + val promise: Promise[UnlessShutdown[Boolean]] = Promise[UnlessShutdown[Boolean]]() + promise.future.onComplete(_ => { + val _ = conditions.updateAndGet(_.filterNot(_.promise.isCompleted)) + }) + + def check(): Unit = { + if (!promise.isCompleted) { + // Ok to use onComplete as any exception will be propagated to the promise. + func.onComplete { + case Success(false) => // nothing to do, will retry later + case res => + val _ = promise.tryComplete(res.map(UnlessShutdown.Outcome(_))) + } + } + } + } + + private[topology] def scheduleAwait( + condition: => Future[Boolean], + timeout: Duration, + ): FutureUnlessShutdown[Boolean] = { + val waiter = new StateAwait(condition) + conditions.updateAndGet(_ :+ waiter) + if (!isClosing) { + if (timeout.isFinite) { + clock + .scheduleAfter( + _ => waiter.promise.trySuccess(UnlessShutdown.Outcome(false)).discard, + JDuration.ofMillis(timeout.toMillis), + ) + .discard + } + waiter.check() + } else { + // calling shutdownConditions() will ensure all added conditions are marked as aborted due to shutdown + // ensure we don't have a race condition between isClosing and updating conditions + shutdownConditions() + } + FutureUnlessShutdown(waiter.promise.future) + } +} + +abstract class BaseDomainTopologyClientOld + extends BaseDomainTopologyClient + with DomainTopologyClientWithInitOld { + override def observed( + sequencedTimestamp: SequencedTime, + effectiveTimestamp: EffectiveTime, + sequencerCounter: SequencerCounter, + transactions: Seq[SignedTopologyTransaction[TopologyChangeOp]], + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = + observedInternal(sequencedTimestamp, effectiveTimestamp) +} + +abstract class BaseDomainTopologyClientX + extends BaseDomainTopologyClient + with DomainTopologyClientWithInitX { + override def observed( + sequencedTimestamp: SequencedTime, + effectiveTimestamp: EffectiveTime, + sequencerCounter: SequencerCounter, + transactions: Seq[GenericSignedTopologyTransactionX], + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = + observedInternal(sequencedTimestamp, effectiveTimestamp) +} + +abstract class BaseDomainTopologyClient + extends DomainTopologyClientWithInit + with TopologyAwaiter + with TimeAwaiter { + + def protocolVersion: ProtocolVersion + + private val pendingChanges = new AtomicInteger(0) + + private case class HeadTimestamps( + effectiveTimestamp: EffectiveTime, + approximateTimestamp: ApproximateTime, + ) { + def update( + newEffectiveTimestamp: EffectiveTime, + newApproximateTimestamp: ApproximateTime, + ): HeadTimestamps = { + HeadTimestamps( + effectiveTimestamp = + EffectiveTime(effectiveTimestamp.value.max(newEffectiveTimestamp.value)), + approximateTimestamp = + ApproximateTime(approximateTimestamp.value.max(newApproximateTimestamp.value)), + ) + } + } + private val head = new AtomicReference[HeadTimestamps]( + HeadTimestamps( + EffectiveTime(CantonTimestamp.MinValue), + ApproximateTime(CantonTimestamp.MinValue), + ) + ) + + override def updateHead( + effectiveTimestamp: EffectiveTime, + approximateTimestamp: ApproximateTime, + potentialTopologyChange: Boolean, + )(implicit + traceContext: TraceContext + ): Unit = { + val curHead = + head.updateAndGet(_.update(effectiveTimestamp, approximateTimestamp)) + // now notify the futures that wait for this update here. as the update is active at t+epsilon, (see most recent timestamp), + // we'll need to notify accordingly + notifyAwaitedFutures(curHead.effectiveTimestamp.value.immediateSuccessor) + if (potentialTopologyChange) + checkAwaitingConditions() + } + + protected def currentKnownTime: CantonTimestamp = topologyKnownUntilTimestamp + + override def numPendingChanges: Int = pendingChanges.get() + + protected def observedInternal( + sequencedTimestamp: SequencedTime, + effectiveTimestamp: EffectiveTime, + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + + // we update the head timestamp approximation with the current sequenced timestamp, right now + updateHead( + effectiveTimestamp, + ApproximateTime(sequencedTimestamp.value), + potentialTopologyChange = false, + ) + // notify anyone who is waiting on some condition + checkAwaitingConditions() + // and we schedule an update to the effective time in due time so that we start using the + // right keys at the right time. + if (effectiveTimestamp.value > sequencedTimestamp.value) { + val deltaDuration = effectiveTimestamp.value - sequencedTimestamp.value + pendingChanges.incrementAndGet() + // schedule using after as we don't know the clock synchronisation level, but we know the relative time. + clock + .scheduleAfter( + _ => { + updateHead( + effectiveTimestamp, + ApproximateTime(effectiveTimestamp.value), + potentialTopologyChange = true, + ) + if (pendingChanges.decrementAndGet() == 0) { + logger.debug( + s"Effective at $effectiveTimestamp, there are no more pending topology changes (last were from $sequencedTimestamp)" + ) + } + }, + deltaDuration, + ) + .discard + } + FutureUnlessShutdown.unit + } + + /** Returns whether a snapshot for the given timestamp is available. */ + override def snapshotAvailable(timestamp: CantonTimestamp): Boolean = + topologyKnownUntilTimestamp >= timestamp + + override def topologyKnownUntilTimestamp: CantonTimestamp = + head.get().effectiveTimestamp.value.immediateSuccessor + + /** returns the current approximate timestamp + * + * whenever we get an update, we do set the approximate timestamp first to the sequencer time + * and schedule an update on the clock to advance the approximate time to the effective time + * after the time difference elapsed. + */ + override def approximateTimestamp: CantonTimestamp = + head.get().approximateTimestamp.value.immediateSuccessor + + override def awaitTimestampUS(timestamp: CantonTimestamp, waitForEffectiveTime: Boolean)(implicit + traceContext: TraceContext + ): Option[FutureUnlessShutdown[Unit]] = + if (waitForEffectiveTime) + this.awaitKnownTimestampUS(timestamp) + else + Some( + for { + snapshotAtTs <- awaitSnapshotUS(timestamp) + parametersAtTs <- performUnlessClosingF(functionFullName)( + snapshotAtTs.findDynamicDomainParametersOrDefault(protocolVersion) + ) + epsilonAtTs = parametersAtTs.topologyChangeDelay + // then, wait for t+e + _ <- awaitKnownTimestampUS(timestamp.plus(epsilonAtTs.unwrap)) + .getOrElse(FutureUnlessShutdown.unit) + } yield () + ) + + override def awaitTimestamp( + timestamp: CantonTimestamp, + waitForEffectiveTime: Boolean, + )(implicit traceContext: TraceContext): Option[Future[Unit]] = if (waitForEffectiveTime) + this.awaitKnownTimestamp(timestamp) + else if (approximateTimestamp >= timestamp) None + else { + Some( + // first, let's wait until we can determine the epsilon for the given timestamp + for { + snapshotAtTs <- awaitSnapshot(timestamp) + parametersAtTs <- snapshotAtTs.findDynamicDomainParametersOrDefault(protocolVersion) + epsilonAtTs = parametersAtTs.topologyChangeDelay + // then, wait for t+e + _ <- awaitKnownTimestamp(timestamp.plus(epsilonAtTs.unwrap)).getOrElse(Future.unit) + } yield () + ) + } + + override protected def onClosed(): Unit = { + expireTimeAwaiter() + super.onClosed() + } + + override def await(condition: TopologySnapshot => Future[Boolean], timeout: Duration)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Boolean] = + scheduleAwait(condition(currentSnapshotApproximation), timeout) + +} + +/** The domain topology client that reads data from a topology store + * + * @param domainId The domain-id corresponding to this store + * @param store The store + * @param initKeys The set of initial keys that should be mixed in in case we fetched an empty set of keys + * @param useStateTxs Whether we use the state store or the transaction store. Generally, we use the state store + * except in the authorized store + */ +class StoreBasedDomainTopologyClient( + val clock: Clock, + val domainId: DomainId, + val protocolVersion: ProtocolVersion, + store: TopologyStore[TopologyStoreId], + initKeys: Map[Member, Seq[SigningPublicKey]], + packageDependencies: PackageId => EitherT[Future, PackageId, Set[PackageId]], + override val timeouts: ProcessingTimeout, + override protected val futureSupervisor: FutureSupervisor, + val loggerFactory: NamedLoggerFactory, + useStateTxs: Boolean = true, +)(implicit val executionContext: ExecutionContext) + extends BaseDomainTopologyClientOld + with NamedLogging { + + override def trySnapshot( + timestamp: CantonTimestamp + )(implicit traceContext: TraceContext): StoreBasedTopologySnapshot = { + ErrorUtil.requireArgument( + timestamp <= topologyKnownUntilTimestamp, + s"requested snapshot=$timestamp, topology known until=$topologyKnownUntilTimestamp", + ) + new StoreBasedTopologySnapshot( + timestamp, + store, + initKeys, + useStateTxs = useStateTxs, + packageDependencies, + loggerFactory, + ) + } + +} + +object StoreBasedDomainTopologyClient { + + def NoPackageDependencies: PackageId => EitherT[Future, PackageId, Set[PackageId]] = { _ => + EitherT(Future.successful(Either.right(Set.empty[PackageId]))) + } +} + +/** Topology snapshot loader + * + * @param timestamp the asOf timestamp to use + * @param store the db store to use + * @param initKeys any additional keys to use (for bootstrapping domains) + * @param useStateTxs whether the queries should use the state or the tx store. state store means all tx are properly authorized + * @param packageDependencies lookup function to determine the direct and indirect package dependencies + */ +class StoreBasedTopologySnapshot( + val timestamp: CantonTimestamp, + store: TopologyStore[TopologyStoreId], + initKeys: Map[Member, Seq[SigningPublicKey]], + useStateTxs: Boolean, + packageDependencies: PackageId => EitherT[Future, PackageId, Set[PackageId]], + val loggerFactory: NamedLoggerFactory, +)(implicit val executionContext: ExecutionContext) + extends TopologySnapshotLoader + with NamedLogging + with NoTracing { + + private def findTransactions( + asOfInclusive: Boolean, + includeSecondary: Boolean, + types: Seq[DomainTopologyTransactionType], + filterUid: Option[Seq[UniqueIdentifier]], + filterNamespace: Option[Seq[Namespace]], + )(implicit + traceContext: TraceContext + ): Future[StoredTopologyTransactions[TopologyChangeOp.Positive]] = + if (useStateTxs) + store + .findStateTransactions( + timestamp, + asOfInclusive, + includeSecondary, + types, + filterUid, + filterNamespace, + ) + .map(_.combine) + else + store + .findPositiveTransactions( + timestamp, + asOfInclusive, + includeSecondary, + types, + filterUid, + filterNamespace, + ) + .map(_.combine) + + // helper class used to fold a sequence of mappings, aggregating permissions and participant parties + private case class PartyAggregation( + work: Map[ParticipantId, (Option[ParticipantPermission], Option[ParticipantPermission])] + ) { + + def addPartyToParticipantMapping(mapping: PartyToParticipant): PartyAggregation = + update(mapping.participant, mapping.side, mapping.permission) + + private def update( + participant: ParticipantId, + side: RequestSide, + permission: ParticipantPermission, + ): PartyAggregation = { + val (from, to) = work.getOrElse(participant, (None, None)) + + def mix(cur: Option[ParticipantPermission]) = + Some(ParticipantPermission.lowerOf(permission, cur.getOrElse(permission))) + + val updated = side match { + case RequestSide.Both => (mix(from), mix(to)) + case RequestSide.To => (from, mix(to)) + case RequestSide.From => (mix(from), to) + } + copy(work = work.updated(participant, updated)) + } + + def addParticipantState(ps: ParticipantState): PartyAggregation = { + if (ps.permission.isActive) + update(ps.participant, ps.side, ps.permission) + else + this + } + + } + + override private[client] def loadActiveParticipantsOf( + party: PartyId, + fetchParticipantStates: Seq[ParticipantId] => Future[ + Map[ParticipantId, ParticipantAttributes] + ], + ): Future[PartyInfo] = + loadBatchActiveParticipantsOf(Seq(party), fetchParticipantStates).map( + _.getOrElse(party, PartyInfo.EmptyPartyInfo) + ) + + override private[client] def loadBatchActiveParticipantsOf( + parties: Seq[PartyId], + fetchParticipantStates: Seq[ParticipantId] => Future[ + Map[ParticipantId, ParticipantAttributes] + ], + ): Future[Map[PartyId, PartyInfo]] = { + def update( + party: PartyId, + mp: Map[PartyId, PartyAggregation], + appender: PartyAggregation => PartyAggregation, + ): Map[PartyId, PartyAggregation] = { + mp + (party -> appender(mp.getOrElse(party, PartyAggregation(Map())))) + } + for { + + // get all party to participant mappings and also participant states for this uid (latter to mix in admin parties) + transactions <- findTransactions( + asOfInclusive = false, + includeSecondary = false, + types = Seq( + DomainTopologyTransactionType.ParticipantState, + DomainTopologyTransactionType.PartyToParticipant, + ), + filterUid = Some(parties.map(_.uid)), + filterNamespace = None, + ).map(_.toTopologyState) + + // aggregate the mappings, looking for matching request sides + allAggregated = transactions.foldLeft(Map.empty[PartyId, PartyAggregation]) { + case (acc, TopologyStateUpdateElement(_, pp: PartyToParticipant)) => + update(pp.party, acc, _.addPartyToParticipantMapping(pp)) + // aggregate participant states (for admin parties) + case (acc, TopologyStateUpdateElement(_, ps: ParticipantState)) => + update(ps.participant.adminParty, acc, _.addParticipantState(ps)) + case (acc, _) => acc + } + // fetch the participant permissions on this domain + participantStateMap <- fetchParticipantStates( + allAggregated.values.flatMap(_.work.keys).toSeq.distinct + ) + } yield { + // cap the party to participant permission to the participant permission + def capped(aggregated: PartyAggregation): Map[ParticipantId, ParticipantAttributes] = { + aggregated.work + .map { case (participantId, (from, to)) => + val participantState = + participantStateMap.getOrElse( + participantId, + ParticipantAttributes(ParticipantPermission.Disabled, TrustLevel.Ordinary), + ) + // using the lowest permission available + val reducedPerm = ParticipantPermission.lowerOf( + from.getOrElse(ParticipantPermission.Disabled), + ParticipantPermission + .lowerOf( + to.getOrElse(ParticipantPermission.Disabled), + participantState.permission, + ), + ) + (participantId, ParticipantAttributes(reducedPerm, participantState.trustLevel)) + } + // filter out in-active + .filter(_._2.permission.isActive) + } + val partyToParticipantAttributes = allAggregated.fmap(v => capped(v)) + // For each party we must return a result to satisfy the expectations of the + // calling CachingTopologySnapshot's caffeine partyCache per findings in #11598. + parties.map { party => + party -> PartyInfo.nonConsortiumPartyInfo( + partyToParticipantAttributes.getOrElse(party, Map.empty) + ) + }.toMap + } + } + + override def allKeys(owner: Member): Future[KeyCollection] = + findTransactions( + asOfInclusive = false, + includeSecondary = false, + types = Seq(DomainTopologyTransactionType.OwnerToKeyMapping), + filterUid = Some(Seq(owner.uid)), + filterNamespace = None, + ).map(_.toTopologyState) + .map(_.collect { + case TopologyStateUpdateElement(_, OwnerToKeyMapping(foundOwner, key)) + if foundOwner.code == owner.code => + key + }.foldLeft(KeyCollection(Seq(), Seq()))((acc, key) => acc.addTo(key))) + .map { collection => + // add initialisation keys if necessary + if (collection.signingKeys.isEmpty) { + initKeys + .get(owner) + .fold(collection)(_.foldLeft(collection)((acc, elem) => acc.addTo(elem))) + } else { + collection + } + } + + override def participants(): Future[Seq[(ParticipantId, ParticipantPermission)]] = + findTransactions( + asOfInclusive = false, + includeSecondary = false, + types = Seq(DomainTopologyTransactionType.ParticipantState), + filterUid = None, + filterNamespace = None, + ).map(_.toTopologyState) + // TODO(i4930) this is quite inefficient + .map(_.collect { case TopologyStateUpdateElement(_, ps: ParticipantState) => + ps.participant + }) + .flatMap { all => + loadParticipantStates(all.distinct) + } + .map { + _.map { case (k, v) => + (k, v.permission) + }.toSeq + } + + override def loadParticipantStates( + participants: Seq[ParticipantId] + ): Future[Map[ParticipantId, ParticipantAttributes]] = { + def merge( + current: (Option[ParticipantAttributes], Option[ParticipantAttributes]), + ps: ParticipantState, + ): (Option[ParticipantAttributes], Option[ParticipantAttributes]) = { + val (from, to) = current + val rel = ParticipantAttributes(ps.permission, ps.trustLevel) + + def mix(cur: Option[ParticipantAttributes]) = Some(cur.getOrElse(rel).merge(rel)) + + ps.side match { + case RequestSide.From => (mix(from), to) + case RequestSide.To => (from, mix(to)) + case RequestSide.Both => (mix(from), mix(to)) + } + } + + implicit val traceContext: TraceContext = TraceContext.todo + if (participants.isEmpty) Future.successful(Map()) + else { + findTransactions( + asOfInclusive = false, + includeSecondary = false, + types = Seq(DomainTopologyTransactionType.ParticipantState), + filterUid = Some(participants.map(_.uid)), + filterNamespace = None, + ).map(_.toTopologyState) + .map { loaded => + loaded + .foldLeft( + Map + .empty[ + ParticipantId, + (Option[ParticipantAttributes], Option[ParticipantAttributes]), + ] + ) { + case (acc, TopologyStateUpdateElement(_, ps: ParticipantState)) => + acc.updated(ps.participant, merge(acc.getOrElse(ps.participant, (None, None)), ps)) + case (acc, _) => acc + } + .mapFilter { + case (Some(lft), Some(rght)) => + // merge permissions granted by participant vs domain + // but take trust level only from "from" (domain side) + Some( + ParticipantAttributes( + ParticipantPermission.lowerOf(lft.permission, rght.permission), + lft.trustLevel, + ) + ) + case (None, None) => None + case (None, _) => None + case (_, None) => None + } + } + } + } + + override def findParticipantState( + participantId: ParticipantId + ): Future[Option[ParticipantAttributes]] = + loadParticipantStates(Seq(participantId)).map(_.get(participantId)) + + override def findParticipantCertificate( + participantId: ParticipantId + )(implicit traceContext: TraceContext): Future[Option[LegalIdentityClaimEvidence.X509Cert]] = { + import cats.implicits.* + findTransactions( + asOfInclusive = false, + includeSecondary = false, + types = Seq(DomainTopologyTransactionType.SignedLegalIdentityClaim), + filterUid = Some(Seq(participantId.uid)), + filterNamespace = None, + ).map(_.toTopologyState.reverse.collectFirstSome { + case TopologyStateUpdateElement(_id, SignedLegalIdentityClaim(_, claimBytes, _signature)) => + val result = for { + claim <- LegalIdentityClaim + .fromByteString(claimBytes) + .leftMap(err => s"Failed to parse legal identity claim proto: $err") + + certOpt = claim.evidence match { + case cert: LegalIdentityClaimEvidence.X509Cert if claim.uid == participantId.uid => + Some(cert) + case _ => None + } + } yield certOpt + + result.valueOr { err => + logger.error(s"Failed to inspect domain topology state for participant certificate: $err") + None + } + + case _ => None + }) + } + + /** Returns a list of all known parties on this domain */ + override def inspectKeys( + filterOwner: String, + filterOwnerType: Option[MemberCode], + limit: Int, + ): Future[Map[Member, KeyCollection]] = { + store + .inspect( + stateStore = useStateTxs, + timeQuery = TimeQuery.Snapshot(timestamp), + recentTimestampO = None, + ops = Some(TopologyChangeOp.Add), + typ = Some(DomainTopologyTransactionType.OwnerToKeyMapping), + idFilter = filterOwner, + namespaceOnly = false, + ) + .map { col => + col.toTopologyState + .map(_.mapping) + .collect { + case OwnerToKeyMapping(owner, key) + if owner.filterString.startsWith(filterOwner) + && filterOwnerType.forall(_ == owner.code) => + (owner, key) + } + .groupBy(_._1) + .map { case (owner, keys) => + ( + owner, + keys.foldLeft(KeyCollection.empty) { case (col, (_, publicKey)) => + col.addTo(publicKey) + }, + ) + } + .take(limit) + } + } + + /** Returns a list of all known parties on this domain */ + override def inspectKnownParties( + filterParty: String, + filterParticipant: String, + limit: Int, + ): Future[Set[PartyId]] = + store.inspectKnownParties(timestamp, filterParty, filterParticipant, limit) + + override private[client] def loadUnvettedPackagesOrDependencies( + participant: ParticipantId, + packageId: PackageId, + ): EitherT[Future, PackageId, Set[PackageId]] = { + + val vettedET = EitherT.right[PackageId]( + findTransactions( + asOfInclusive = false, + includeSecondary = false, + types = Seq(DomainTopologyTransactionType.PackageUse), + filterUid = Some(Seq(participant.uid)), + filterNamespace = None, + ).map { res => + res.toTopologyState.flatMap { + case TopologyStateUpdateElement(_, VettedPackages(_, packageIds)) => packageIds + case _ => Seq() + }.toSet + } + ) + + val dependenciesET = packageDependencies(packageId) + + for { + vetted <- vettedET + // check that the main package is vetted + res <- + if (!vetted.contains(packageId)) + EitherT.rightT[Future, PackageId](Set(packageId)) // main package is not vetted + else { + // check which of the dependencies aren't vetted + for { + dependencies <- dependenciesET + } yield dependencies -- vetted + } + } yield res + + } + + /** returns the list of currently known mediators + * for singular mediators each one must be wrapped into its own group with threshold = 1 + * group index in 2.0 topology management is not used and the order of output does not need to be stable + */ + override def mediatorGroups(): Future[Seq[MediatorGroup]] = findTransactions( + asOfInclusive = false, + includeSecondary = false, + types = Seq(DomainTopologyTransactionType.MediatorDomainState), + filterUid = None, + filterNamespace = None, + ).map { res => + ArraySeq + .from( + res.toTopologyState + .foldLeft(Map.empty[MediatorId, (Boolean, Boolean)]) { + case (acc, TopologyStateUpdateElement(_, MediatorDomainState(side, _, mediator))) => + acc + (mediator -> RequestSide + .accumulateSide(acc.getOrElse(mediator, (false, false)), side)) + case (acc, _) => acc + } + .filter { case (_, (lft, rght)) => + lft && rght + } + .keys + ) + .zipWithIndex + .map { case (id, index) => + MediatorGroup( + index = NonNegativeInt.tryCreate(index), + Seq(id), + Seq.empty, + threshold = PositiveInt.one, + ) + } + } + + /** returns the current sequencer group if known + * TODO(#14048): Decide whether it is advantageous e.g. for testing to expose a sequencer-group on daml 2.* + * perhaps we cook up a SequencerId based on the domainId assuming that the sequencer (or sequencers all with the + * same sequencerId) is/are active + */ + override def sequencerGroup(): Future[Option[SequencerGroup]] = Future.failed( + new UnsupportedOperationException( + "SequencerGroup lookup not supported by StoreBasedDomainTopologyClient. This is a coding bug." + ) + ) + + override def allMembers(): Future[Set[Member]] = Future.failed( + new UnsupportedOperationException( + "Lookup of all members is not supported by StoredBasedDomainTopologyClient. This is a coding bug." + ) + ) + + override def isMemberKnown(member: Member): Future[Boolean] = Future.failed( + new UnsupportedOperationException( + "Lookup of members via isMemberKnown is not supported by StoredBasedDomainTopologyClient. This is a coding bug." + ) + ) + + override def findDynamicDomainParameters()(implicit + traceContext: TraceContext + ): Future[Either[String, DynamicDomainParametersWithValidity]] = + findTransactions( + asOfInclusive = false, + includeSecondary = false, + types = Seq(DomainTopologyTransactionType.DomainParameters), + filterUid = None, + filterNamespace = None, + ).map { storedTxs => + val domainParameters = storedTxs.result + .mapFilter { storedTx => + storedTx.transaction.transaction.element match { + case DomainGovernanceElement(DomainParametersChange(domainId, domainParameters)) => + Some( + DynamicDomainParametersWithValidity( + domainParameters, + storedTx.validFrom.value, + storedTx.validUntil.map(_.value), + domainId, + ) + ) + case _ => None + } + } + + // We sort the results to be able to pick the most recent one in case + // several transactions are found. + val sortedDomainParameters = domainParameters.sortBy(_.validFrom) + + NonEmpty.from(sortedDomainParameters).map { domainParametersNel => + if (domainParametersNel.sizeCompare(1) > 0) + logger.warn( + s"Expecting only one dynamic domain parameters, ${domainParametersNel.size} found. Considering the most recent one." + ) + domainParametersNel.last1 + } + }.map(_.toRight(s"Unable to fetch domain parameters at $timestamp")) + + override def listDynamicDomainParametersChanges()(implicit + traceContext: TraceContext + ): Future[Seq[DynamicDomainParametersWithValidity]] = store + .inspect( + stateStore = false, + timeQuery = TimeQuery.Range(None, Some(timestamp)), + recentTimestampO = None, + ops = Some(TopologyChangeOp.Replace), + typ = Some(DomainTopologyTransactionType.DomainParameters), + idFilter = "", + namespaceOnly = false, + ) + .map { + _.result + .map(storedTx => + ( + storedTx.validFrom.value, + storedTx.validUntil.map(_.value), + storedTx.transaction.transaction.element, + ) + ) + .collect { + case ( + validFrom, + validUntil, + DomainGovernanceElement(DomainParametersChange(domainId, domainParameters)), + ) => + DynamicDomainParametersWithValidity(domainParameters, validFrom, validUntil, domainId) + } + } + + override def trafficControlStatus( + members: Seq[Member] + ): Future[Map[Member, Option[MemberTrafficControlState]]] = { + // Non-X topology management does not support traffic control transactions + Future.successful(members.map(_ -> None).toMap) + } + + /* + This client does not support consortium parties, i.e. for all requested + parties it delegates to themself with threshold 1 + */ + override def authorityOf( + parties: Set[LfPartyId] + ): Future[PartyTopologySnapshotClient.AuthorityOfResponse] = + Future.successful( + PartyTopologySnapshotClient.AuthorityOfResponse( + parties.map(partyId => partyId -> nonConsortiumPartyDelegation(partyId)).toMap + ) + ) +} + +object StoreBasedTopologySnapshot { + def headstateOfAuthorizedStore( + topologyStore: TopologyStore[AuthorizedStore], + loggerFactory: NamedLoggerFactory, + )(implicit executionContext: ExecutionContext): StoreBasedTopologySnapshot = { + new StoreBasedTopologySnapshot( + CantonTimestamp.MaxValue, // we use a max value here, as this will give us the "head snapshot" transactions (valid_from < t && until.isNone) + topologyStore, + Map(), + useStateTxs = false, + packageDependencies = StoreBasedDomainTopologyClient.NoPackageDependencies, + loggerFactory, + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedDomainTopologyClientX.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedDomainTopologyClientX.scala new file mode 100644 index 0000000000..eed7b5926e --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedDomainTopologyClientX.scala @@ -0,0 +1,720 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.client + +import cats.data.EitherT +import cats.syntax.functorFilter.* +import cats.syntax.option.* +import com.daml.lf.data.Ref.PackageId +import com.digitalasset.canton.LfPartyId +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.protocol.DynamicDomainParametersWithValidity +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.client.PartyTopologySnapshotClient.{ + AuthorityOfDelegation, + AuthorityOfResponse, + PartyInfo, +} +import com.digitalasset.canton.topology.store.* +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.tracing.{NoTracing, TraceContext} +import com.digitalasset.canton.util.ErrorUtil +import com.digitalasset.canton.version.ProtocolVersion + +import scala.concurrent.{ExecutionContext, Future} +import scala.reflect.ClassTag + +/** The domain topology client that reads data from a topology store + * + * @param domainId The domain-id corresponding to this store + * @param store The store + */ +class StoreBasedDomainTopologyClientX( + val clock: Clock, + val domainId: DomainId, + val protocolVersion: ProtocolVersion, + store: TopologyStoreX[TopologyStoreId], + packageDependencies: PackageId => EitherT[Future, PackageId, Set[PackageId]], + override val timeouts: ProcessingTimeout, + override protected val futureSupervisor: FutureSupervisor, + val loggerFactory: NamedLoggerFactory, +)(implicit val executionContext: ExecutionContext) + extends BaseDomainTopologyClientX + with NamedLogging { + + override def trySnapshot( + timestamp: CantonTimestamp + )(implicit traceContext: TraceContext): StoreBasedTopologySnapshotX = { + ErrorUtil.requireArgument( + timestamp <= topologyKnownUntilTimestamp, + s"requested snapshot=$timestamp, topology known until=$topologyKnownUntilTimestamp", + ) + new StoreBasedTopologySnapshotX( + timestamp, + store, + packageDependencies, + loggerFactory, + ) + } + +} + +/** Topology snapshot loader + * + * @param timestamp the asOf timestamp to use + * @param store the db store to use + * @param packageDependencies lookup function to determine the direct and indirect package dependencies + */ +class StoreBasedTopologySnapshotX( + val timestamp: CantonTimestamp, + store: TopologyStoreX[TopologyStoreId], + packageDependencies: PackageId => EitherT[Future, PackageId, Set[PackageId]], + val loggerFactory: NamedLoggerFactory, +)(implicit val executionContext: ExecutionContext) + extends TopologySnapshotLoader + with NamedLogging + with NoTracing { + + private def findTransactions( + asOfInclusive: Boolean, + types: Seq[TopologyMappingX.Code], + filterUid: Option[Seq[UniqueIdentifier]], + filterNamespace: Option[Seq[Namespace]], + )(implicit + traceContext: TraceContext + ): Future[StoredTopologyTransactionsX[TopologyChangeOpX.Replace, TopologyMappingX]] = + store + .findPositiveTransactions( + timestamp, + asOfInclusive, + isProposal = false, + types, + filterUid, + filterNamespace, + ) + + override private[client] def loadUnvettedPackagesOrDependencies( + participant: ParticipantId, + packageId: PackageId, + ): EitherT[Future, PackageId, Set[PackageId]] = { + + val vettedET = EitherT.right[PackageId]( + findTransactions( + asOfInclusive = false, + types = Seq(TopologyMappingX.Code.VettedPackagesX), + filterUid = Some(Seq(participant.uid)), + filterNamespace = None, + ).map { transactions => + collectLatestMapping( + TopologyMappingX.Code.VettedPackagesX, + transactions.collectOfMapping[VettedPackagesX].result, + ).toList.flatMap(_.packageIds).toSet + } + ) + + val requiredPackagesET = EitherT.right[PackageId]( + findTransactions( + asOfInclusive = false, + types = Seq(TopologyMappingX.Code.DomainParametersStateX), + filterUid = None, + filterNamespace = None, + ).map { transactions => + collectLatestMapping( + TopologyMappingX.Code.DomainParametersStateX, + transactions.collectOfMapping[DomainParametersStateX].result, + ).getOrElse(throw new IllegalStateException("Unable to locate domain parameters state")) + .discard + + // TODO(#14054) Once the non-proto DynamicDomainParametersX is available, use it + // _.parameters.requiredPackages + Seq.empty[PackageId] + } + ) + + lazy val dependenciesET = packageDependencies(packageId) + + for { + vetted <- vettedET + requiredPackages <- requiredPackagesET + // check that the main package is vetted + res <- + if (!vetted.contains(packageId)) + EitherT.rightT[Future, PackageId](Set(packageId)) // main package is not vetted + else { + // check which of the dependencies aren't vetted + dependenciesET.map(deps => (deps ++ requiredPackages) -- vetted) + } + } yield res + + } + + override def findDynamicDomainParameters()(implicit + traceContext: TraceContext + ): Future[Either[String, DynamicDomainParametersWithValidity]] = + findTransactions( + asOfInclusive = false, + types = Seq(TopologyMappingX.Code.DomainParametersStateX), + filterUid = None, + filterNamespace = None, + ).map { transactions => + for { + storedTx <- collectLatestTransaction( + TopologyMappingX.Code.DomainParametersStateX, + transactions + .collectOfMapping[DomainParametersStateX] + .result, + ).toRight(s"Unable to fetch domain parameters at $timestamp") + + domainParameters = { + val mapping = storedTx.transaction.transaction.mapping + DynamicDomainParametersWithValidity( + mapping.parameters, + storedTx.validFrom.value, + storedTx.validUntil.map(_.value), + mapping.domain, + ) + } + } yield domainParameters + } + + /** List all the dynamic domain parameters (past and current) */ + override def listDynamicDomainParametersChanges()(implicit + traceContext: TraceContext + ): Future[Seq[DynamicDomainParametersWithValidity]] = store + .inspect( + proposals = false, + timeQuery = TimeQueryX.Range(None, Some(timestamp)), + recentTimestampO = None, + op = Some(TopologyChangeOpX.Replace), + typ = Some(TopologyMappingX.Code.DomainParametersStateX), + idFilter = "", + namespaceOnly = false, + ) + .map { + _.collectOfMapping[DomainParametersStateX].result + .map { storedTx => + val dps = storedTx.transaction.transaction.mapping + DynamicDomainParametersWithValidity( + dps.parameters, + storedTx.validFrom.value, + storedTx.validUntil.map(_.value), + dps.domain, + ) + } + } + + override private[client] def loadActiveParticipantsOf( + party: PartyId, + participantStates: Seq[ParticipantId] => Future[Map[ParticipantId, ParticipantAttributes]], + ): Future[PartyInfo] = + loadBatchActiveParticipantsOf(Seq(party), participantStates).map( + _.getOrElse(party, PartyInfo.EmptyPartyInfo) + ) + + override private[client] def loadBatchActiveParticipantsOf( + parties: Seq[PartyId], + loadParticipantStates: Seq[ParticipantId] => Future[Map[ParticipantId, ParticipantAttributes]], + ): Future[Map[PartyId, PartyInfo]] = { + + def collectLatestByType[M <: TopologyMappingX: ClassTag]( + storedTransactions: StoredTopologyTransactionsX[ + TopologyChangeOpX.Replace, + TopologyMappingX, + ], + code: TopologyMappingX.Code, + ): Seq[M] = { + storedTransactions + .collectOfMapping[M] + .result + .groupBy(_.transaction.transaction.mapping.uniqueKey) + .map { case (_, seq) => + collectLatestMapping[M]( + code, + seq.sortBy(_.validFrom), + ).getOrElse( + throw new IllegalStateException( + "Group-by would not have produced empty PartyToParticipantX seq" + ) + ) + } + .toSeq + } + + for { + // get all party to participant mappings and also participant states for this uid (latter to mix in admin parties) + partyData <- findTransactions( + asOfInclusive = false, + types = Seq( + TopologyMappingX.Code.PartyToParticipantX, + TopologyMappingX.Code.DomainTrustCertificateX, + ), + filterUid = Some(parties.map(_.uid)), + filterNamespace = None, + ).map { storedTransactions => + // find normal party declarations + val partyToParticipantMappings = collectLatestByType[PartyToParticipantX]( + storedTransactions, + TopologyMappingX.Code.PartyToParticipantX, + ).map { ptp => + ptp.partyId -> (ptp.groupAddressing, ptp.threshold, ptp.participants.map { + case HostingParticipant(participantId, partyPermission) => + participantId -> partyPermission.toNonX + }.toMap) + }.toMap + + // admin parties are implicitly defined by the fact that a participant is available on a domain. + // admin parties have the same UID as their participant + val domainTrustCerts = collectLatestByType[DomainTrustCertificateX]( + storedTransactions, + TopologyMappingX.Code.DomainTrustCertificateX, + ).map(cert => cert.participantId) + + (partyToParticipantMappings, domainTrustCerts) + } + (partyToParticipantMap, adminPartyParticipants) = partyData + + // fetch all admin parties + participantIds = partyToParticipantMap.values + .map(_._3) + .flatMap(_.keys) + .toSeq ++ adminPartyParticipants + + participantToAttributesMap <- loadParticipantStates(participantIds) + + adminPartiesMap = adminPartyParticipants + .mapFilter(participantId => + participantToAttributesMap + .get(participantId) + .map(attrs => + // participant admin parties are never consortium parties + participantId.adminParty -> PartyInfo + .nonConsortiumPartyInfo(Map(participantId -> attrs)) + ) + ) + .toMap + + // In case the party->participant mapping contains participants missing from map returned + // by loadParticipantStates, filter out participants with "empty" permissions and transitively + // parties whose participants have all been filtered out this way. + // this can only affect participants that have left the domain + partiesToPartyInfos = { + val p2pMappings = partyToParticipantMap.toSeq.mapFilter { + case (partyId, (groupAddressing, threshold, participantToPermissionsMap)) => + val participantIdToAttribs = participantToPermissionsMap.toSeq.mapFilter { + case (participantId, partyPermission) => + participantToAttributesMap + .get(participantId) + .map { participantAttributes => + // Use the lower permission between party and the permission granted to the participant by the domain + val reducedPermission = { + ParticipantPermission.lowerOf( + partyPermission, + participantAttributes.permission, + ) + } + participantId -> ParticipantAttributes( + reducedPermission, + participantAttributes.trustLevel, + ) + } + }.toMap + if (participantIdToAttribs.isEmpty) None + else Some(partyId -> PartyInfo(groupAddressing, threshold, participantIdToAttribs)) + }.toMap + p2pMappings ++ + adminPartiesMap.collect { + // TODO(#12390) - Remove this extra caution not to add admin parties if somehow they already exist as p2p + // mappings once corresponding validation is in place. + case x @ (adminPartyId, _) if !p2pMappings.contains(adminPartyId) => x + } + } + // For each party we must return a result to satisfy the expectations of the + // calling CachingTopologySnapshot's caffeine partyCache per findings in #11598. + // This includes parties not found in the topology store or parties filtered out + // above, e.g. parties whose participants have left the domain. + fullySpecifiedPartyMap = parties.map { party => + party -> partiesToPartyInfos.getOrElse(party, PartyInfo.EmptyPartyInfo) + }.toMap + } yield fullySpecifiedPartyMap + } + + /** returns the list of currently known mediator groups */ + override def mediatorGroups(): Future[Seq[MediatorGroup]] = findTransactions( + asOfInclusive = false, + types = Seq(TopologyMappingX.Code.MediatorDomainStateX), + filterUid = None, + filterNamespace = None, + ).map( + _.collectOfMapping[MediatorDomainStateX].result + .groupBy(_.transaction.transaction.mapping.group) + .map { case (groupId, seq) => + val mds = collectLatestMapping( + TopologyMappingX.Code.MediatorDomainStateX, + seq.sortBy(_.validFrom), + ) + .getOrElse(throw new IllegalStateException("Group-by would not have produced empty seq")) + MediatorGroup(groupId, mds.active, mds.observers, mds.threshold) + } + .toSeq + .sortBy(_.index) + ) + + override def sequencerGroup(): Future[Option[SequencerGroup]] = findTransactions( + asOfInclusive = false, + types = Seq(TopologyMappingX.Code.SequencerDomainStateX), + filterUid = None, + filterNamespace = None, + ).map { transactions => + collectLatestMapping( + TopologyMappingX.Code.SequencerDomainStateX, + transactions.collectOfMapping[SequencerDomainStateX].result, + ).map { (sds: SequencerDomainStateX) => + SequencerGroup(sds.active, sds.observers, sds.threshold) + } + } + + def trafficControlStatus( + members: Seq[Member] + ): Future[Map[Member, Option[MemberTrafficControlState]]] = findTransactions( + asOfInclusive = false, + types = Seq(TopologyMappingX.Code.TrafficControlStateX), + filterUid = Some(members.map(_.uid)), + filterNamespace = None, + ).map { txs => + val membersWithState = txs + .collectOfMapping[TrafficControlStateX] + .result + .groupBy(_.transaction.transaction.mapping.member) + .flatMap { case (member, mappings) => + collectLatestMapping( + TopologyMappingX.Code.TrafficControlStateX, + mappings.sortBy(_.validFrom), + ).map(mapping => + Some(MemberTrafficControlState(totalExtraTrafficLimit = mapping.totalExtraTrafficLimit)) + ).map(member -> _) + } + + val membersWithoutState = members.toSet.diff(membersWithState.keySet).map(_ -> None).toMap + + membersWithState ++ membersWithoutState + } + + /** Returns a list of all known parties on this domain */ + override def inspectKnownParties( + filterParty: String, + filterParticipant: String, + limit: Int, + ): Future[Set[PartyId]] = + store.inspectKnownParties(timestamp, filterParty, filterParticipant, limit) + + /** Returns authority-of delegations for consortium parties or self/1 for non consortium parties */ + override def authorityOf(parties: Set[LfPartyId]): Future[AuthorityOfResponse] = findTransactions( + asOfInclusive = false, + types = Seq(TopologyMappingX.Code.AuthorityOfX), + filterUid = None, + filterNamespace = None, + ).map { transactions => + val consortiumDelegations = + transactions + .collectOfMapping[AuthorityOfX] + .result + .groupBy(_.transaction.transaction.mapping.partyId) + .collect { + case (partyId, seq) if parties.contains(partyId.toLf) => + val authorityOf = collectLatestMapping( + TopologyMappingX.Code.AuthorityOfX, + seq.sortBy(_.validFrom), + ) + .getOrElse( + throw new IllegalStateException("Group-by would not have produced empty seq") + ) + partyId.toLf -> AuthorityOfDelegation( + authorityOf.parties.map(_.toLf).toSet, + authorityOf.threshold, + ) + } + // If not all parties are consortium parties, fall back to the behavior of the PartyTopologySnapshotClient super trait + // to produce a mapping to self with threshold one (without checking whether the party exists on the domain). + val nonConsortiumPartyDelegations = (parties -- consortiumDelegations.keys) + .map(partyId => partyId -> PartyTopologySnapshotClient.nonConsortiumPartyDelegation(partyId)) + .toMap + AuthorityOfResponse(consortiumDelegations ++ nonConsortiumPartyDelegations) + } + + /** Returns a list of owner's keys (at most limit) */ + override def inspectKeys( + filterOwner: String, + filterOwnerType: Option[MemberCode], + limit: Int, + ): Future[Map[Member, KeyCollection]] = { + store + .inspect( + proposals = false, + timeQuery = TimeQueryX.Snapshot(timestamp), + recentTimestampO = None, + op = Some(TopologyChangeOpX.Replace), + typ = Some(TopologyMappingX.Code.OwnerToKeyMappingX), + idFilter = filterOwner, + namespaceOnly = false, + ) + .map( + _.collectOfMapping[OwnerToKeyMappingX] + .collectOfType[TopologyChangeOpX.Replace] + .result + .groupBy(_.transaction.transaction.mapping.member) + .collect { + case (owner, seq) + if owner.filterString.startsWith(filterOwner) + && filterOwnerType.forall(_ == owner.code) => + val keys = KeyCollection(Seq(), Seq()) + val okm = + collectLatestMapping( + TopologyMappingX.Code.OwnerToKeyMappingX, + seq.sortBy(_.validFrom), + ) + owner -> okm + .fold(keys)(_.keys.take(limit).foldLeft(keys) { case (keys, key) => + keys.addTo(key) + }) + } + ) + } + + override def findParticipantState( + participantId: ParticipantId + ): Future[Option[ParticipantAttributes]] = + loadParticipantStates(Seq(participantId)).map(_.get(participantId)) + + private def loadParticipantStatesHelper( + participantsFilter: Option[Seq[ParticipantId]] // None means fetch all participants + ): Future[Map[ParticipantId, ParticipantDomainPermissionX]] = for { + // Looks up domain parameters for default rate limits. + domainParametersState <- findTransactions( + asOfInclusive = false, + types = Seq( + TopologyMappingX.Code.DomainParametersStateX + ), + filterUid = None, + filterNamespace = None, + ).map(transactions => + collectLatestMapping( + TopologyMappingX.Code.DomainParametersStateX, + transactions.collectOfMapping[DomainParametersStateX].result, + ).getOrElse(throw new IllegalStateException("Unable to locate domain parameters state")) + ) + // 1. Participant needs to have requested access to domain by issuing a domain trust certificate + participantsWithCertificates <- findTransactions( + asOfInclusive = false, + types = Seq( + TopologyMappingX.Code.DomainTrustCertificateX + ), + filterUid = None, + filterNamespace = None, + ).map( + _.collectOfMapping[DomainTrustCertificateX].result + .groupBy(_.transaction.transaction.mapping.participantId) + .collect { + case (pid, seq) if participantsFilter.forall(_.contains(pid)) => + // invoke collectLatestMapping only to warn in case a participantId's domain trust certificate is not unique + collectLatestMapping( + TopologyMappingX.Code.DomainTrustCertificateX, + seq.sortBy(_.validFrom), + ).discard + pid + } + .toSeq + ) + // 2. Participant needs to have keys registered on the domain + participantsWithCertAndKeys <- findTransactions( + asOfInclusive = false, + types = Seq(TopologyMappingX.Code.OwnerToKeyMappingX), + filterUid = Some(participantsWithCertificates.map(_.uid)), + filterNamespace = None, + ).map( + _.collectOfMapping[OwnerToKeyMappingX].result + .groupBy(_.transaction.transaction.mapping.member) + .collect { + case (pid: ParticipantId, seq) + if collectLatestMapping( + TopologyMappingX.Code.OwnerToKeyMappingX, + seq.sortBy(_.validFrom), + ).nonEmpty => + pid + } + ) + // Warn about participants with cert but no keys + _ = (participantsWithCertificates.toSet -- participantsWithCertAndKeys.toSet).foreach { pid => + logger.warn( + s"Participant ${pid} has a domain trust certificate, but no keys on domain ${domainParametersState.domain}" + ) + } + // 3. Attempt to look up permissions/trust from participant domain permission + participantDomainPermissions <- findTransactions( + asOfInclusive = false, + types = Seq( + TopologyMappingX.Code.ParticipantDomainPermissionX + ), + filterUid = None, + filterNamespace = None, + ).map( + _.collectOfMapping[ParticipantDomainPermissionX].result + .groupBy(_.transaction.transaction.mapping.participantId) + .map { case (pid, seq) => + val mapping = + collectLatestMapping( + TopologyMappingX.Code.ParticipantDomainPermissionX, + seq.sortBy(_.validFrom), + ) + .getOrElse( + throw new IllegalStateException("Group-by would not have produced empty seq") + ) + pid -> mapping + } + ) + // 4. Apply default permissions/trust of submission/ordinary if missing participant domain permission and + // grab rate limits from dynamic domain parameters if not specified + participantIdDomainPermissionsMap = participantsWithCertAndKeys.map { pid => + pid -> participantDomainPermissions + .getOrElse( + pid, + ParticipantDomainPermissionX.default(domainParametersState.domain, pid), + ) + .setDefaultLimitIfNotSet(domainParametersState.parameters.v2DefaultParticipantLimits) + }.toMap + } yield participantIdDomainPermissionsMap + + /** abstract loading function used to load the participant state for the given set of participant-ids */ + override def loadParticipantStates( + participants: Seq[ParticipantId] + ): Future[Map[ParticipantId, ParticipantAttributes]] = + if (participants.isEmpty) + Future.successful(Map()) + else + loadParticipantStatesHelper(participants.some).map(_.map { case (pid, pdp) => + pid -> pdp.toParticipantAttributes + }) + + override def participants(): Future[Seq[(ParticipantId, ParticipantPermission)]] = + Future.failed( + new UnsupportedOperationException( + s"Participants lookup not supported by StoreBasedDomainTopologyClientX. This is a coding bug." + ) + ) + override def findParticipantCertificate(participantId: ParticipantId)(implicit + traceContext: TraceContext + ): Future[Option[LegalIdentityClaimEvidence.X509Cert]] = + Future.failed( + new UnsupportedOperationException( + s"Legal claims not supported by StoreBasedDomainTopologyClientX. This is a coding bug." + ) + ) + + /** abstract loading function used to obtain the full key collection for a key owner */ + override def allKeys(owner: Member): Future[KeyCollection] = findTransactions( + asOfInclusive = false, + types = Seq(TopologyMappingX.Code.OwnerToKeyMappingX), + filterUid = Some(Seq(owner.uid)), + filterNamespace = None, + ) + .map { transactions => + val keys = KeyCollection(Seq(), Seq()) + collectLatestMapping[OwnerToKeyMappingX]( + TopologyMappingX.Code.OwnerToKeyMappingX, + transactions.collectOfMapping[OwnerToKeyMappingX].result, + ).fold(keys)(_.keys.foldLeft(keys) { case (keys, key) => keys.addTo(key) }) + } + + override def allMembers(): Future[Set[Member]] = { + findTransactions( + asOfInclusive = false, + types = Seq( + DomainTrustCertificateX.code, + MediatorDomainStateX.code, + SequencerDomainStateX.code, + ), + filterUid = None, + filterNamespace = None, + ).map( + _.result.view + .map(_.transaction.transaction.mapping) + .flatMap { + case dtc: DomainTrustCertificateX => Seq(dtc.participantId) + case mds: MediatorDomainStateX => mds.active ++ mds.observers + case sds: SequencerDomainStateX => sds.active ++ sds.observers + case _ => Seq.empty + } + .toSet + ) + } + + override def isMemberKnown(member: Member): Future[Boolean] = { + member match { + case ParticipantId(pid) => + findTransactions( + asOfInclusive = false, + types = Seq(DomainTrustCertificateX.code), + filterUid = Some(Seq(pid)), + filterNamespace = None, + ).map(_.result.nonEmpty) + case mediatorId @ MediatorId(_) => + findTransactions( + asOfInclusive = false, + types = Seq(MediatorDomainStateX.code), + filterUid = None, + filterNamespace = None, + ).map( + _.collectOfMapping[MediatorDomainStateX].result + .exists(_.transaction.transaction.mapping.allMediatorsInGroup.contains(mediatorId)) + ) + case sequencerId @ SequencerId(_) => + findTransactions( + asOfInclusive = false, + types = Seq(SequencerDomainStateX.code), + filterUid = None, + filterNamespace = None, + ).map( + _.collectOfMapping[SequencerDomainStateX].result + .exists(_.transaction.transaction.mapping.allSequencers.contains(sequencerId)) + ) + case _ => + Future.failed( + new IllegalArgumentException( + s"Checking whether member is known for an unexpected member type: $member" + ) + ) + } + } + + private def collectLatestMapping[T <: TopologyMappingX]( + typ: TopologyMappingX.Code, + transactions: Seq[StoredTopologyTransactionX[TopologyChangeOpX.Replace, T]], + ): Option[T] = collectLatestTransaction(typ, transactions).map(_.transaction.transaction.mapping) + + private def collectLatestTransaction[T <: TopologyMappingX]( + typ: TopologyMappingX.Code, + transactions: Seq[StoredTopologyTransactionX[TopologyChangeOpX.Replace, T]], + ): Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, T]] = { + if (transactions.sizeCompare(1) > 0) { + logger.warn( + s"Expected unique \"${typ.code}\" at $referenceTime, but found multiple instances" + ) + transactions + .foldLeft(CantonTimestamp.Epoch) { case (previous, tx) => + val validFrom = tx.validFrom.value + if (previous >= validFrom) { + logger.warn( + s"Instance of \"${typ.code}\" with hash \"${tx.transaction.transaction.hash.hash.toHexString}\" with non-monotonically growing valid-from effective time: previous $previous, new: $validFrom" + ) + } + validFrom + } + .discard + } + transactions.lastOption + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationChainX.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationChainX.scala new file mode 100644 index 0000000000..9781ca83e4 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationChainX.scala @@ -0,0 +1,54 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.processing + +import cats.Monoid +import com.digitalasset.canton.topology.processing.AuthorizedTopologyTransactionX.{ + AuthorizedIdentifierDelegationX, + AuthorizedNamespaceDelegationX, + AuthorizedUnionspaceDefinitionX, +} + +import scala.collection.mutable + +/** authorization data + * + * this type is returned by the authorization validator. it contains the series of transactions + * that authorize a certain topology transaction. + * + * note that the order of the namespace delegation is in "authorization order". + */ +final case class AuthorizationChainX( + identifierDelegation: Seq[AuthorizedIdentifierDelegationX], + namespaceDelegations: Seq[AuthorizedNamespaceDelegationX], + unionspaceDefinitions: Seq[AuthorizedUnionspaceDefinitionX], +) { + + def addIdentifierDelegation(aid: AuthorizedIdentifierDelegationX): AuthorizationChainX = + copy(identifierDelegation = identifierDelegation :+ aid) + + def merge(other: AuthorizationChainX): AuthorizationChainX = { + AuthorizationChainX( + mergeUnique(this.identifierDelegation, other.identifierDelegation), + mergeUnique(this.namespaceDelegations, other.namespaceDelegations), + mergeUnique(this.unionspaceDefinitions, other.unionspaceDefinitions), + ) + } + + private def mergeUnique[T](left: Seq[T], right: Seq[T]): Seq[T] = { + mutable.LinkedHashSet.from(left).addAll(right).toSeq + } + +} + +object AuthorizationChainX { + val empty = AuthorizationChainX(Seq(), Seq(), Seq()) + + implicit val monoid: Monoid[AuthorizationChainX] = new Monoid[AuthorizationChainX] { + override def empty: AuthorizationChainX = AuthorizationChainX.empty + + override def combine(x: AuthorizationChainX, y: AuthorizationChainX): AuthorizationChainX = + x.merge(y) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationGraph.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationGraph.scala new file mode 100644 index 0000000000..d9931be06b --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationGraph.scala @@ -0,0 +1,340 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.processing + +import com.digitalasset.canton.crypto.Fingerprint +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.topology.Namespace +import com.digitalasset.canton.topology.processing.AuthorizedTopologyTransaction.* +import com.digitalasset.canton.topology.processing.TransactionAuthorizationValidator.AuthorizationChain +import com.digitalasset.canton.topology.transaction.{ + IdentifierDelegation, + NamespaceDelegation, + SignedTopologyTransaction, + TopologyChangeOp, + TopologyMapping, + UniquePath, +} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ErrorUtil +import com.digitalasset.canton.util.ShowUtil.* + +import scala.annotation.tailrec +import scala.collection.concurrent.TrieMap +import scala.math.Ordering.Implicits.* + +/** An authorized topology transaction */ +final case class AuthorizedTopologyTransaction[T <: TopologyMapping]( + uniquePath: UniquePath, + mapping: T, + transaction: SignedTopologyTransaction[TopologyChangeOp], +) { + def signingKey: Fingerprint = transaction.key.fingerprint +} + +object AuthorizedTopologyTransaction { + + type AuthorizedNamespaceDelegation = AuthorizedTopologyTransaction[NamespaceDelegation] + type AuthorizedIdentifierDelegation = AuthorizedTopologyTransaction[IdentifierDelegation] + + /** Returns true if the namespace delegation is a root certificate + * + * A root certificate is defined by the namespace delegation that authorizes the + * key f to act on namespace spanned by f, authorized by f. + */ + def isRootCertificate(namespaceDelegation: AuthorizedNamespaceDelegation): Boolean = { + val mapping = namespaceDelegation.mapping + (mapping.namespace.fingerprint == mapping.target.fingerprint) && namespaceDelegation.signingKey == mapping.target.fingerprint + } + +} + +/** maintain a dependency graph for the namespace delegations + * + * namespace delegations are a bit tricky as there can be an arbitrary number of delegations before we reach + * the actual key that will be used for authorizations. think of it as a certificate chain where we get a + * series of certificates and we need to figure out a path from one certificate to the root certificate. + * + * NOTE: this class is not thread-safe + * + * properties of the graph: + * - the nodes are the target key fingerprints + * - the node with fingerprint of the namespace is the root node + * - the edges between the nodes are the authorizations where key A authorizes key B to act on the namespace + * in this case, the authorization is outgoing from A and incoming to B. + * - the graph SHOULD be directed acyclic graph, but we MIGHT have cycles (i.e. key A authorizing B, B authorizing A). + * we don't need to make a fuss about cycles in the graph. we just ignore / report them assuming it was an admin + * mistake, but we don't get confused. + * - root certificates are edges pointing to the node itself. they are separate such that they don't show up + * in the list of incoming / outgoing. + * - we track for each node the set of outgoing edges and incoming edges. an outgoing edge is a delegation where + * the source node is authorizing a target node. obviously every outgoing edge is also an incoming edge. + * + * computation task: + * - once we've modified the graph, we compute the nodes that are somehow connected to the root node. + * + * purpose: + * - once we know which target keys are actually authorized to act on this particular namespace, we can then use + * this information to find out which resulting mapping is properly authorized and which one not. + * + * authorization checks: + * - when adding "single transactions", we do check that the transaction is properly authorized. otherwise we + * "ignore" it (returning false). this is used during processing. + * - when adding "batch transactions", we don't check that all of them are properly authorized, as we do allow + * temporarily "nodes" to be unauthorized (so that errors can be fixed by adding a replacement certificate) + * - when removing transactions, we do check that the authorizing key is authorized. but note that the authorizing + * key of an edge REMOVAL doesn't need to match the key used to authorized the ADD. + */ +class AuthorizationGraph( + namespace: Namespace, + extraDebugInfo: Boolean, + val loggerFactory: NamedLoggerFactory, +) extends NamedLogging { + + private case class GraphNode( + target: Fingerprint, + root: Set[AuthorizedNamespaceDelegation] = Set(), + outgoing: Set[AuthorizedNamespaceDelegation] = Set(), + incoming: Set[AuthorizedNamespaceDelegation] = Set(), + ) { + + def isEmpty: Boolean = root.isEmpty && outgoing.isEmpty && incoming.isEmpty + + } + + private abstract class AuthLevel(val isAuth: Boolean, val isRoot: Boolean) + private object AuthLevel { + + object NotAuthorized extends AuthLevel(false, false) + object Standard extends AuthLevel(true, false) + object RootDelegation extends AuthLevel(true, true) + + implicit val orderingAuthLevel: Ordering[AuthLevel] = + Ordering.by[AuthLevel, Int](authl => { Seq(authl.isAuth, authl.isRoot).count(identity) }) + + def fromDelegationO(delegation: Option[AuthorizedNamespaceDelegation]): AuthLevel = + delegation match { + case None => AuthLevel.NotAuthorized + case Some(item) if item.mapping.isRootDelegation => RootDelegation + case Some(_) => Standard + } + + } + + private val nodes = new TrieMap[Fingerprint, GraphNode]() + + /** temporary cache for the current graph authorization check results + * + * if a fingerprint is empty, then we haven't yet computed the answer + */ + private val cache = new TrieMap[Fingerprint, Option[AuthorizedNamespaceDelegation]]() + + def add(item: AuthorizedNamespaceDelegation)(implicit traceContext: TraceContext): Boolean = { + ErrorUtil.requireArgument( + item.mapping.namespace == namespace, + s"added namespace ${item.mapping.namespace} to $namespace", + ) + if ( + AuthorizedTopologyTransaction.isRootCertificate(item) || + this.isValidAuthorizationKey(item.signingKey, requireRoot = true) + ) { + doAdd(item) + recompute() + true + } else false + } + + def unauthorizedAdd( + items: Seq[AuthorizedNamespaceDelegation] + )(implicit traceContext: TraceContext): Unit = { + items.foreach(doAdd) + recompute() + } + + private def doAdd(item: AuthorizedNamespaceDelegation): Unit = { + val targetKey = item.mapping.target.fingerprint + val curTarget = nodes.getOrElse(targetKey, GraphNode(targetKey)) + // if this is a root certificate, remember it separately + if (AuthorizedTopologyTransaction.isRootCertificate(item)) { + nodes.update(targetKey, curTarget.copy(root = curTarget.root + item)) + } else { + val authKey = item.signingKey + val curAuth = nodes.getOrElse(authKey, GraphNode(authKey)) + nodes.update(authKey, curAuth.copy(outgoing = curAuth.outgoing + item)) + nodes.update(targetKey, curTarget.copy(incoming = curTarget.incoming + item)) + } + } + + def remove(item: AuthorizedNamespaceDelegation)(implicit traceContext: TraceContext): Boolean = + if (isValidAuthorizationKey(item.signingKey, requireRoot = true)) { + doRemove(item) + true + } else false + + def unauthorizedRemove( + items: Seq[AuthorizedNamespaceDelegation] + )(implicit traceContext: TraceContext): Unit = { + items.foreach(doRemove) + } + + /** remove a namespace delegation + * + * note that this one is a bit tricky as the removal might have been authorized + * by a different key than the addition. this is fine but it complicates the book-keeping, + * as we need to track for each target key what the "incoming authorizations" were solely for the + * purpose of being able to clean them up + */ + private def doRemove( + item: AuthorizedNamespaceDelegation + )(implicit traceContext: TraceContext): Unit = { + ErrorUtil.requireArgument( + item.mapping.namespace == namespace, + s"added namespace ${item.mapping.namespace} to $namespace", + ) + def myFilter(existing: AuthorizedNamespaceDelegation): Boolean = { + // the auth key doesn't need to match on removals + existing.uniquePath != item.uniquePath || existing.mapping != item.mapping + } + def updateRemove(key: Fingerprint, res: GraphNode): Unit = { + val _ = + if (res.isEmpty) + nodes.remove(key) + else + nodes.update(key, res) + } + def removeOutgoing(node: GraphNode): Unit = { + // we need to use the "incoming" edges to figure out the original outgoing keys, as the key that + // was authorizing this removal might not be the one that authorized the addition + node.incoming.map(_.signingKey).foreach { fp => + nodes.get(fp) match { + case Some(curIncoming) => + // remove for this key the edge that goes to the target node + updateRemove(fp, curIncoming.copy(outgoing = curIncoming.outgoing.filter(myFilter))) + case None => + logger.error( + s"Broken authorization graph when removing $item as node ${node.target} says that $fp is incoming, but $fp does not exist as a node" + ) + } + } + } + val targetKey = item.mapping.target.fingerprint + nodes.get(targetKey) match { + case Some(curTarget) => + if (AuthorizedTopologyTransaction.isRootCertificate(item)) { + // if this is a root certificate, then we need to remove the self edge + updateRemove(targetKey, curTarget.copy(root = curTarget.root.filter(myFilter))) + } else { + // we need to remove this "edge" from both nodes. + // on the target node, this is a simple incoming edge + // however, on the source node, this is a bit different + removeOutgoing(curTarget) + // remove incoming + updateRemove(targetKey, curTarget.copy(incoming = curTarget.incoming.filter(myFilter))) + } + recompute() + case None => + logger.warn(s"Superfluous removal of namespace delegation $item") + } + + } + + protected def recompute()(implicit traceContext: TraceContext): Unit = { + cache.clear() + // recompute authorization graph starting from the root certificate + // this is a graph potentially with cycles, as users might accidentally (or maliciously) + // create useless certificates chain cycles. + // however, the actual computation is simple: we start at the root certificate + // and we let the "trust" (auth-level) flow from there downwards. + // as a result, every key that doesn't end up in the cache is not connected + // to the root certificate and therefore useless. + // some nodes might be visited more than once, but only if the auth-level is increasing. + // this will guarantee that we eventually terminate + def go(fingerprint: Fingerprint, incoming: AuthorizedNamespaceDelegation): Unit = { + val current = cache.getOrElseUpdate(fingerprint, None) + val currentLevel = AuthLevel.fromDelegationO(current) + val incomingLevel = AuthLevel.fromDelegationO(Some(incoming)) + // this inherited level is higher than current, propagate it + if (incomingLevel > currentLevel) { + cache.update(fingerprint, Some(incoming)) + // get the graph node of this fingerprint + nodes.get(fingerprint).foreach { tracker => + // iterate through all edges that depart from this node + tracker.outgoing + .map(x => (AuthLevel.fromDelegationO(Some(x)), x)) + // only propagate edges that require lower or equal authorization level than what we have from incoming + // so an outgoing root delegation can not be authorized by an incoming non-root delegation + .filter { case (outgoingLevel, _) => outgoingLevel <= incomingLevel } + .foreach { + // iterate through all target fingerprint, taking the edge outgoing from this node as the incoming + case (_, outgoing) => go(outgoing.mapping.target.fingerprint, incoming = outgoing) + } + } + } + } + // start iterating from root certificate (if there is one) + nodes.get(namespace.fingerprint).flatMap(_.root.headOption).foreach { root => + go(namespace.fingerprint, root) + } + report() + } + + def report()(implicit traceContext: TraceContext): Unit = + if (nodes.get(namespace.fingerprint).flatMap(_.root.headOption).isDefined) { + val dangling = nodes.keySet.diff(cache.keySet) + if (dangling.nonEmpty) { + logger.warn(s"The following target keys of namespace $namespace are dangling: $dangling") + } + if (extraDebugInfo && logger.underlying.isDebugEnabled) { + val str = + authorizedDelegations() + .map(aud => + show"auth=${aud.signingKey}, target=${aud.mapping.target.fingerprint}, root=${isRootCertificate(aud)}, elementId=${aud.uniquePath.maybeElementId}" + ) + .mkString("\n ") + logger.debug(s"The authorization graph is given by:\n $str") + } + } else + logger.debug( + s"Namespace ${namespace} has no root certificate, making all ${nodes.size} un-authorized" + ) + + def isValidAuthorizationKey(authKey: Fingerprint, requireRoot: Boolean): Boolean = { + val authLevel = AuthLevel.fromDelegationO(cache.getOrElse(authKey, None)) + authLevel.isRoot || (authLevel.isAuth && !requireRoot) + } + + def authorizationChain( + startAuthKey: Fingerprint, + requireRoot: Boolean, + ): Option[AuthorizationChain] = { + @tailrec + def go( + authKey: Fingerprint, + requireRoot: Boolean, + acc: List[AuthorizedNamespaceDelegation], + ): List[AuthorizedNamespaceDelegation] = { + cache.getOrElse(authKey, None) match { + // we've terminated with the root certificate + case Some(delegation) if isRootCertificate(delegation) => + delegation :: acc + // cert is valid, append it + case Some(delegation) if delegation.mapping.isRootDelegation || !requireRoot => + go(delegation.signingKey, delegation.mapping.isRootDelegation, delegation :: acc) + // return empty to indicate failure + case _ => List.empty + } + } + go(startAuthKey, requireRoot, List.empty) match { + case Nil => None + case rest => + Some( + AuthorizationChain(identifierDelegation = Seq.empty, namespaceDelegations = rest) + ) + } + } + + def authorizedDelegations(): Seq[AuthorizedNamespaceDelegation] = + cache.values.flatMap(_.toList).toSeq + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationGraphX.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationGraphX.scala new file mode 100644 index 0000000000..084b726c09 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationGraphX.scala @@ -0,0 +1,458 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.processing + +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.crypto.{Fingerprint, SigningPublicKey} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.topology.Namespace +import com.digitalasset.canton.topology.processing.AuthorizedTopologyTransactionX.AuthorizedNamespaceDelegationX +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ErrorUtil +import com.digitalasset.canton.util.ShowUtil.* + +import scala.annotation.tailrec +import scala.collection.concurrent.TrieMap +import scala.math.Ordering.Implicits.* + +/** An authorized topology transaction */ +final case class AuthorizedTopologyTransactionX[T <: TopologyMappingX]( + signedTransaction: SignedTopologyTransactionX[TopologyChangeOpX, T] +) { + def mapping: T = signedTransaction.transaction.mapping + def signingKeys: NonEmpty[Set[Fingerprint]] = signedTransaction.signatures.map(_.signedBy) +} + +object AuthorizedTopologyTransactionX { + + type AuthorizedNamespaceDelegationX = AuthorizedTopologyTransactionX[NamespaceDelegationX] + type AuthorizedIdentifierDelegationX = AuthorizedTopologyTransactionX[IdentifierDelegationX] + type AuthorizedUnionspaceDefinitionX = AuthorizedTopologyTransactionX[UnionspaceDefinitionX] + + /** Returns true if the namespace delegation is a root certificate + * + * A root certificate is defined by the namespace delegation that authorizes the + * key f to act on namespace spanned by f, authorized by f. + */ + def isRootCertificate(namespaceDelegation: AuthorizedNamespaceDelegationX): Boolean = { + NamespaceDelegationX.isRootCertificate(namespaceDelegation.signedTransaction) + } + + /** Returns true if the namespace delegation is a root certificate or a root delegation + * + * A root certificate is defined by the namespace delegation that authorizes the + * key f to act on namespace spanned by f, authorized by f. + * + * A root delegation is defined by the namespace delegation the authorizes the + * key g to act on namespace spanned by f. + */ + def isRootDelegation(namespaceDelegation: AuthorizedNamespaceDelegationX): Boolean = { + NamespaceDelegationX.isRootDelegation(namespaceDelegation.signedTransaction) + } + +} + +/** maintain a dependency graph for the namespace delegations + * + * namespace delegations are a bit tricky as there can be an arbitrary number of delegations before we reach + * the actual key that will be used for authorizations. think of it as a certificate chain where we get a + * series of certificates and we need to figure out a path from one certificate to the root certificate. + * + * NOTE: this class is not thread-safe + * + * properties of the graph: + * - the nodes are the target key fingerprints + * - the node with fingerprint of the namespace is the root node + * - the edges between the nodes are the authorizations where key A authorizes key B to act on the namespace + * in this case, the authorization is outgoing from A and incoming to B. + * - the graph SHOULD be a directed acyclic graph, but we MIGHT have cycles (i.e. key A authorizing B, B authorizing A). + * we don't need to make a fuss about cycles in the graph. we just ignore / report them assuming it was an admin + * mistake, but we don't get confused. + * - root certificates are edges pointing to the node itself. they are separate such that they don't show up + * in the list of incoming / outgoing. + * - we track for each node the set of outgoing edges and incoming edges. an outgoing edge is a delegation where + * the source node is authorizing a target node. obviously every outgoing edge is also an incoming edge. + * + * computation task: + * - once we've modified the graph, we compute the nodes that are somehow connected to the root node. + * + * purpose: + * - once we know which target keys are actually authorized to act on this particular namespace, we can then use + * this information to find out which resulting mapping is properly authorized and which one is not. + * + * authorization checks: + * - when adding "single transactions", we do check that the transaction is properly authorized. otherwise we + * "ignore" it (returning false). this is used during processing. + * - when adding "batch transactions", we don't check that all of them are properly authorized, as we do allow + * temporarily "nodes" to be unauthorized (so that errors can be fixed by adding a replacement certificate) + * - when removing transactions, we do check that the authorizing key is authorized. but note that the authorizing + * key of an edge REMOVAL doesn't need to match the key used to authorized the ADD. + */ +class AuthorizationGraphX( + val namespace: Namespace, + extraDebugInfo: Boolean, + val loggerFactory: NamedLoggerFactory, +) extends AuthorizationCheckX + with NamedLogging { + + private case class GraphNode( + target: Fingerprint, + root: Option[AuthorizedNamespaceDelegationX] = None, + outgoing: Set[AuthorizedNamespaceDelegationX] = Set(), + incoming: Set[AuthorizedNamespaceDelegationX] = Set(), + ) { + + def isEmpty: Boolean = root.isEmpty && outgoing.isEmpty && incoming.isEmpty + + } + + private abstract class AuthLevel(val isAuth: Boolean, val isRoot: Boolean) + private object AuthLevel { + + object NotAuthorized extends AuthLevel(false, false) + object Standard extends AuthLevel(true, false) + object RootDelegation extends AuthLevel(true, true) + + implicit val orderingAuthLevel: Ordering[AuthLevel] = + Ordering.by[AuthLevel, Int](authl => Seq(authl.isAuth, authl.isRoot).count(identity)) + + def fromDelegationO(delegation: Option[AuthorizedNamespaceDelegationX]): AuthLevel = + delegation match { + case None => AuthLevel.NotAuthorized + case Some(item) if item.mapping.isRootDelegation => RootDelegation + case Some(_) => Standard + } + + } + + private val nodes = new TrieMap[Fingerprint, GraphNode]() + + /** temporary cache for the current graph authorization check results + * + * if a fingerprint is empty, then we haven't yet computed the answer + */ + private val cache = + new TrieMap[Fingerprint, Option[AuthorizedNamespaceDelegationX]]() + + def add(item: AuthorizedNamespaceDelegationX)(implicit traceContext: TraceContext): Boolean = { + ErrorUtil.requireArgument( + item.mapping.namespace == namespace, + s"added namespace ${item.mapping.namespace} to $namespace", + ) + if ( + AuthorizedTopologyTransactionX.isRootCertificate(item) || + this.areValidAuthorizationKeys(item.signingKeys, requireRoot = true) + ) { + doAdd(item) + recompute() + true + } else false + } + + def unauthorizedAdd( + items: Seq[AuthorizedNamespaceDelegationX] + )(implicit traceContext: TraceContext): Unit = { + items.foreach(doAdd) + recompute() + } + + private def doAdd( + item: AuthorizedNamespaceDelegationX + )(implicit traceContext: TraceContext): Unit = { + val targetKey = item.mapping.target.fingerprint + val curTarget = nodes.getOrElse(targetKey, GraphNode(targetKey)) + // if this is a root certificate, remember it separately + if (AuthorizedTopologyTransactionX.isRootCertificate(item)) { + ErrorUtil.requireState( + curTarget.root.forall(_ == item), + s"Trying to add a root certificate for $namespace that differs from a previously added root certificate.\nKnown=[${curTarget.root}]\nToAdd=[$item]", + ) + nodes.update(targetKey, curTarget.copy(root = Some(item))) + } else { + item.signingKeys.foreach { authKey => + val curAuth = nodes.getOrElse(authKey, GraphNode(authKey)) + nodes.update(authKey, curAuth.copy(outgoing = curAuth.outgoing + item)) + } + nodes.update(targetKey, curTarget.copy(incoming = curTarget.incoming + item)) + } + } + + def remove(item: AuthorizedNamespaceDelegationX)(implicit traceContext: TraceContext): Boolean = + if (areValidAuthorizationKeys(item.signingKeys, requireRoot = true)) { + doRemove(item) + true + } else false + + def unauthorizedRemove( + items: Seq[AuthorizedNamespaceDelegationX] + )(implicit traceContext: TraceContext): Unit = { + items.foreach(doRemove) + } + + /** remove a namespace delegation + * + * note that this one is a bit tricky as the removal might have been authorized + * by a different key than the addition. this is fine but it complicates the book-keeping, + * as we need to track for each target key what the "incoming authorizations" were solely for the + * purpose of being able to clean them up + */ + private def doRemove( + item: AuthorizedNamespaceDelegationX + )(implicit traceContext: TraceContext): Unit = { + ErrorUtil.requireArgument( + item.mapping.namespace == namespace, + s"removing namespace ${item.mapping.namespace} from $namespace", + ) + def myFilter(existing: AuthorizedNamespaceDelegationX): Boolean = { + // the auth key doesn't need to match on removals + existing.mapping != item.mapping + } + def updateRemove(key: Fingerprint, res: GraphNode): Unit = { + val _ = + if (res.isEmpty) + nodes.remove(key) + else + nodes.update(key, res) + } + def removeOutgoing(node: GraphNode): Unit = { + // we need to use the "incoming" edges to figure out the original outgoing keys, as the key that + // was authorizing this removal might not be the one that authorized the addition + node.incoming.flatMap(_.signingKeys).foreach { fp => + nodes.get(fp) match { + case Some(curIncoming) => + // remove for this key the edge that goes to the target node + updateRemove(fp, curIncoming.copy(outgoing = curIncoming.outgoing.filter(myFilter))) + case None => + logger.error( + s"Broken authorization graph when removing $item as node ${node.target} says that $fp is incoming, but $fp does not exist as a node" + ) + } + } + } + val targetKey = item.mapping.target.fingerprint + nodes.get(targetKey) match { + case Some(curTarget) => + if (AuthorizedTopologyTransactionX.isRootCertificate(item)) { + // if this is a root certificate, then we need to remove the self edge + updateRemove(targetKey, curTarget.copy(root = curTarget.root.filter(myFilter))) + } else { + // we need to remove this "edge" from both nodes. + // on the target node, this is a simple incoming edge + // however, on the source node, this is a bit different + removeOutgoing(curTarget) + // remove incoming + updateRemove(targetKey, curTarget.copy(incoming = curTarget.incoming.filter(myFilter))) + } + recompute() + case None => + logger.warn(s"Superfluous removal of namespace delegation $item") + } + + } + + protected def recompute()(implicit traceContext: TraceContext): Unit = { + cache.clear() + // recompute authorization graph starting from the root certificate + // this is a graph potentially with cycles, as users might accidentally (or maliciously) + // create useless certificates chain cycles. + // however, the actual computation is simple: we start at the root certificate + // and we let the "trust" (auth-level) flow from there downwards. + // as a result, every key that doesn't end up in the cache is not connected + // to the root certificate and therefore useless. + // some nodes might be visited more than once, but only if the auth-level is increasing. + // this will guarantee that we eventually terminate + def go( + fingerprint: Fingerprint, + incoming: AuthorizedNamespaceDelegationX, + ): Unit = { + val current = cache.getOrElseUpdate(fingerprint, None) + val currentLevel = AuthLevel.fromDelegationO(current) + val incomingLevel = AuthLevel.fromDelegationO(Some(incoming)) + // this inherited level is higher than current, propagate it + if (incomingLevel > currentLevel) { + cache.update(fingerprint, Some(incoming)) + // get the graph node of this fingerprint + nodes.get(fingerprint).foreach { graphNode => + // iterate through all edges that depart from this node + graphNode.outgoing + .map(x => (AuthLevel.fromDelegationO(Some(x)), x)) + // only propagate edges that require lower or equal authorization level than what we have from incoming + // so an outgoing root delegation can not be authorized by an incoming non-root delegation + .filter { case (outgoingLevel, _) => outgoingLevel <= incomingLevel } + .foreach { + // iterate through all target fingerprint, taking the edge outgoing from this node as the incoming + case (_, outgoing) => + go(outgoing.mapping.target.fingerprint, incoming = outgoing) + } + } + } + } + for { + // start iterating from root certificates for this namespace + graph <- nodes.get(namespace.fingerprint) + root <- graph.root + } { + go(namespace.fingerprint, root) + } + + report() + } + + def report()(implicit traceContext: TraceContext): Unit = + if (nodes.get(namespace.fingerprint).flatMap(_.root).isDefined) { + val dangling = nodes.keySet.diff(cache.keySet) + if (dangling.nonEmpty) { + logger.warn(s"The following target keys of namespace $namespace are dangling: $dangling") + } + if (extraDebugInfo && logger.underlying.isDebugEnabled) { + val str = + authorizedDelegations() + .map(aud => + show"auth=${aud.signingKeys}, target=${aud.mapping.target.fingerprint}, root=${AuthorizedTopologyTransactionX + .isRootCertificate(aud)}" + ) + .mkString("\n ") + logger.debug(s"The authorization graph is given by:\n $str") + } + } else + logger.debug( + s"Namespace ${namespace} has no root certificate, making all ${nodes.size} un-authorized" + ) + + override def areValidAuthorizationKeys( + authKeys: Set[Fingerprint], + requireRoot: Boolean, + ): Boolean = { + authKeys.exists { authKey => + val authLevel = AuthLevel.fromDelegationO(cache.getOrElse(authKey, None)) + authLevel.isRoot || (authLevel.isAuth && !requireRoot) + } + } + + override def getValidAuthorizationKey( + authKey: Fingerprint, + requireRoot: Boolean, + ): Option[SigningPublicKey] = { + cache + .getOrElse(authKey, None) + .map(_.mapping.target) + .filter(_ => areValidAuthorizationKeys(Set(authKey), requireRoot)) + } + + def authorizationChain( + startAuthKey: Fingerprint, + requireRoot: Boolean, + ): Option[AuthorizationChainX] = { + @tailrec + def go( + authKey: Fingerprint, + requireRoot: Boolean, + acc: List[AuthorizedNamespaceDelegationX], + ): List[AuthorizedNamespaceDelegationX] = { + cache.getOrElse(authKey, None) match { + // we've terminated with the root certificate + case Some(delegation) if AuthorizedTopologyTransactionX.isRootCertificate(delegation) => + delegation :: acc + // cert is valid, append it + case Some(delegation) if delegation.mapping.isRootDelegation || !requireRoot => + go(delegation.signingKeys.head1, delegation.mapping.isRootDelegation, delegation :: acc) + // return empty to indicate failure + case _ => List.empty + } + } + go(startAuthKey, requireRoot, List.empty) match { + case Nil => None + case rest => + Some( + AuthorizationChainX( + identifierDelegation = Seq.empty, + namespaceDelegations = rest, + Seq.empty, + ) + ) + } + } + + def authorizedDelegations(): Seq[AuthorizedNamespaceDelegationX] = + cache.values.flatMap(_.toList).toSeq + + override def toString: String = s"AuthorizationGraphX($namespace)" + + def debugInfo() = s"$namespace => ${nodes.mkString("\n")}" +} + +trait AuthorizationCheckX { + def areValidAuthorizationKeys(authKeys: Set[Fingerprint], requireRoot: Boolean): Boolean + + def getValidAuthorizationKey(authKey: Fingerprint, requireRoot: Boolean): Option[SigningPublicKey] + + def authorizationChain( + startAuthKey: Fingerprint, + requireRoot: Boolean, + ): Option[AuthorizationChainX] + + def authorizedDelegations(): Seq[AuthorizedNamespaceDelegationX] +} + +object AuthorizationCheckX { + val empty = new AuthorizationCheckX { + override def areValidAuthorizationKeys( + authKeys: Set[Fingerprint], + requireRoot: Boolean, + ): Boolean = false + + override def authorizationChain( + startAuthKey: Fingerprint, + requireRoot: Boolean, + ): Option[AuthorizationChainX] = None + + override def getValidAuthorizationKey( + authKey: Fingerprint, + requireRoot: Boolean, + ): Option[SigningPublicKey] = None + + override def authorizedDelegations(): Seq[AuthorizedNamespaceDelegationX] = Seq.empty + + override def toString: String = "AuthorizationCheckX.empty" + } +} + +final case class UnionspaceAuthorizationGraphX( + us: UnionspaceDefinitionX, + direct: AuthorizationGraphX, + ownerGraphs: Seq[AuthorizationGraphX], +) extends AuthorizationCheckX { + override def areValidAuthorizationKeys( + authKeys: Set[Fingerprint], + requireRoot: Boolean, + ): Boolean = { + val viaNamespaceDelegation = direct.areValidAuthorizationKeys(authKeys, requireRoot) + val viaCollective = + ownerGraphs.count(_.areValidAuthorizationKeys(authKeys, requireRoot)) >= us.threshold.value + viaNamespaceDelegation || viaCollective + } + + import cats.syntax.foldable.* + + override def getValidAuthorizationKey( + authKey: Fingerprint, + requireRoot: Boolean, + ): Option[SigningPublicKey] = { + (direct +: ownerGraphs).view + .flatMap(_.getValidAuthorizationKey(authKey, requireRoot)) + .headOption + } + + override def authorizationChain( + startAuthKey: Fingerprint, + requireRoot: Boolean, + ): Option[AuthorizationChainX] = + direct + .authorizationChain(startAuthKey, requireRoot) + .orElse(ownerGraphs.map(_.authorizationChain(startAuthKey, requireRoot)).combineAll) + + override def authorizedDelegations(): Seq[AuthorizedNamespaceDelegationX] = + direct.authorizedDelegations() ++ ownerGraphs.flatMap(_.authorizedDelegations()) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/DomainTopologyTransactionMessageValidator.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/DomainTopologyTransactionMessageValidator.scala new file mode 100644 index 0000000000..5ac6328136 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/DomainTopologyTransactionMessageValidator.scala @@ -0,0 +1,280 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.processing + +import cats.data.EitherT +import cats.instances.list.* +import cats.syntax.either.* +import cats.syntax.functorFilter.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.crypto.{SigningPublicKey, SyncCryptoApi, SyncCryptoClient} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.protocol.messages.{ + DefaultOpenEnvelope, + DomainTopologyTransactionMessage, + ProtocolMessage, +} +import com.digitalasset.canton.topology.TopologyManagerError.TopologyManagerAlarm +import com.digitalasset.canton.topology.client.StoreBasedTopologySnapshot +import com.digitalasset.canton.topology.transaction.{ + ParticipantState, + RequestSide, + SignedTopologyTransaction, + TopologyChangeOp, +} +import com.digitalasset.canton.topology.{Member, ParticipantId} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.ProtocolVersion + +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.ExecutionContext + +/** Validate 2.x domain topology transaction messages + * + * The domain topology manager distributes the topology state through domain topology messages to + * all registered and active members. This class is used to verify the correctness of the + * given signature. + * + * Some care is required when validating the signature, as the key used by the domain topology dispatcher + * is actually defined by the state that it changes. + * + * When a new participant connects for the first time, it will receive a first message which is self-consistent, + * where the message is signed by a key for which we get all topology transactions in the message itself. + * + * Therefore, a participant can be sure to talk to the right domain by defining the domain-id on the domain connection, + * because we will check the topology state against this domain-id too. + * + * Replay attacks are prevented using a max-sequencing timestamp which prevents that a message is replayed to different + * recipients at a later point in time. + */ +trait DomainTopologyTransactionMessageValidator { + + def initLastMessageTimestamp(lastMessageTs: Option[CantonTimestamp]): Unit = {} + + def extractTopologyUpdatesAndValidateEnvelope( + ts: SequencedTime, + envelopes: List[DefaultOpenEnvelope], + )(implicit + traceContext: TraceContext, + closeContext: CloseContext, + ): FutureUnlessShutdown[List[SignedTopologyTransaction[TopologyChangeOp]]] + +} + +object DomainTopologyTransactionMessageValidator { + + def create( + skipTopologyManagerSignatureValidation: Boolean, + client: SyncCryptoClient[SyncCryptoApi], + member: Member, + protocolVersion: ProtocolVersion, + timeouts: ProcessingTimeout, + futureSupervisor: FutureSupervisor, + loggerFactory: NamedLoggerFactory, + )(implicit executionContext: ExecutionContext): DomainTopologyTransactionMessageValidator = if ( + skipTopologyManagerSignatureValidation + ) + NoValidation + else new Impl(client, member, protocolVersion, timeouts, futureSupervisor, loggerFactory) + + object NoValidation extends DomainTopologyTransactionMessageValidator { + + override def extractTopologyUpdatesAndValidateEnvelope( + ts: SequencedTime, + envelopes: List[DefaultOpenEnvelope], + )(implicit + traceContext: TraceContext, + closeContext: CloseContext, + ): FutureUnlessShutdown[List[SignedTopologyTransaction[TopologyChangeOp]]] = { + FutureUnlessShutdown.pure( + envelopes + .mapFilter(ProtocolMessage.select[DomainTopologyTransactionMessage]) + .map(_.protocolMessage) + .flatMap(_.transactions) + ) + } + } + + class Impl( + client: SyncCryptoClient[SyncCryptoApi], + member: Member, + protocolVersion: ProtocolVersion, + timeouts: ProcessingTimeout, + futureSupervisor: FutureSupervisor, + val loggerFactory: NamedLoggerFactory, + )(implicit executionContext: ExecutionContext) + extends DomainTopologyTransactionMessageValidator + with NamedLogging { + + private val lastSequencingTimestamp = new AtomicReference[Option[CantonTimestamp]](None) + + override def initLastMessageTimestamp(lastMessageTs: Option[CantonTimestamp]): Unit = { + lastSequencingTimestamp.getAndSet(lastMessageTs) match { + case Some(value) => + noTracingLogger.error( + s"Updating the last sequencing timestamp again from=${value} to=${lastMessageTs}" + ) + case None => + } + } + + private def validateFirstMessage(ts: SequencedTime, message: DomainTopologyTransactionMessage)( + implicit traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, Unit] = if (member.code != ParticipantId.Code) { + // This only applies to the internal sequencer self-connection, which is okay, + // as nobody can connect to the sequencer before the keys are registered + logger.debug( + s"Skipping domain topology transaction signature validation as the right keys are not yet known at ${ts.value}" + ) + EitherT.pure(()) + } else { + for { + // validate that all topology transactions are properly authorized in first snapshot + store <- SnapshotAuthorizationValidator.validateTransactions( + timeouts, + futureSupervisor, + loggerFactory, + )(message.transactions) + // validate that the domain trust certificate is present + // this ensures that the initial first message really came from the domain topology + // manager and was really meant for this participant + _ <- EitherT.cond[FutureUnlessShutdown]( + message.transactions + .filter(_.transaction.op == TopologyChangeOp.Add) + .map(_.transaction.element.mapping) + .exists { + case ParticipantState(RequestSide.From, domain, participant, permission, _) => + permission.isActive && domain == client.domainId && participant == member + case _ => false + }, + (), + "Initial domain topology transaction message does not contain the participant state (side=From) for this participant!", + ) + // finally, validate the signature of the domain topology manager using the keys + // found in the initial message + snapshot = StoreBasedTopologySnapshot.headstateOfAuthorizedStore(store, loggerFactory) + keys <- EitherT + .right(snapshot.allKeys(client.domainId.member)) + .mapK(FutureUnlessShutdown.outcomeK) + _ <- EitherT.fromEither[FutureUnlessShutdown]( + validateMessageAgainstKeys(ts, keys.signingKeys, message) + ) + } yield () + } + + private def validateMessageAgainstKeys( + ts: SequencedTime, + keys: Seq[SigningPublicKey], + message: DomainTopologyTransactionMessage, + )(implicit traceContext: TraceContext): Either[String, Unit] = { + val hash = message.hashToSign(client.pureCrypto) + for { + key <- keys + .find(_.id == message.domainTopologyManagerSignature.signedBy) + .toRight( + s"Domain manager signature with unknown key=${message.domainTopologyManagerSignature.signedBy}. Known=${keys + .map(_.id)}. Skipping ${message.transactions.length} transactions!" + ) + _ <- client.pureCrypto + .verifySignature(hash, key, message.domainTopologyManagerSignature) + .leftMap(error => + s"Signature checking of envelope failed: ${error}. Skipping ${message.transactions.length} transactions!" + ) + // check that the message was not replayed. the domain topology manager will submit these messages + // with the max sequence time which is included in the hash of the message itself. + // so if this message is sequenced after the given timestamp, then somebody tried to replay it + // + // replaying within the max-sequencing-time window is not a problem for already onboarded nodes, + // as they will just deduplicate the transaction within the tx processor. + // + // replaying within the max-sequencing-time window to not yet onboarded nodes is still possible, but + // the issue is rather unlikely: replaying a remove doesn't matter. replaying an add also doesn't matter + // as it would be contained in the state. so the only scenario it matters is if you quickly add an + // add and then remove it immediately. then you can replay the add. + _ <- Either.cond( + message.notSequencedAfter >= ts.value, + (), + s"Detected malicious replay of a domain topology transaction message: Sequenced at ${ts.value}, but max-sequencing-time is ${message.notSequencedAfter}. Skipping ${message.transactions.length} transactions!", + ) + } yield { + logger.debug( + s"Successfully validated domain manager signature at ts=${ts.value} with key ${message.domainTopologyManagerSignature.signedBy}" + ) + } + } + + private def validateMessage( + ts: SequencedTime, + lastTs: Option[CantonTimestamp], + message: DomainTopologyTransactionMessage, + )(implicit + traceContext: TraceContext, + closeContext: CloseContext, + ): EitherT[FutureUnlessShutdown, String, Unit] = { + for { + snapshot <- EitherT.right( + SyncCryptoClient.getSnapshotForTimestampUS( + client, + ts.value, + lastTs, + protocolVersion, + warnIfApproximate = false, + ) + ) + keys <- EitherT + .right( + snapshot.ipsSnapshot.signingKeys(client.domainId.member) + ) + .mapK(FutureUnlessShutdown.outcomeK) + _ <- + // first message is validated specially + if (keys.isEmpty) { + validateFirstMessage(ts, message) + } else + EitherT.fromEither[FutureUnlessShutdown](validateMessageAgainstKeys(ts, keys, message)) + } yield () + } + + override def extractTopologyUpdatesAndValidateEnvelope( + ts: SequencedTime, + envelopes: List[DefaultOpenEnvelope], + )(implicit + traceContext: TraceContext, + closeContext: CloseContext, + ): FutureUnlessShutdown[List[SignedTopologyTransaction[TopologyChangeOp]]] = { + val messages = envelopes + .mapFilter(ProtocolMessage.select[DomainTopologyTransactionMessage]) + .map(_.protocolMessage) + val lastTs = lastSequencingTimestamp.getAndSet(Some(ts.value)) + NonEmpty.from(messages) match { + case None => FutureUnlessShutdown.pure(List.empty) + case Some(messages) if messages.sizeCompare(1) > 0 => + TopologyManagerAlarm + .Warn( + s"Received batch with ${messages.size} envelopes, but I expect only a single one!" + ) + .report() + FutureUnlessShutdown.pure(List.empty) + case Some(messages) => + validateMessage(ts, lastTs, messages.head1).fold( + err => { + TopologyManagerAlarm.Warn(err).report() + logger.info( + s"The failing message contained the following transactions: ${messages.head1.transactions + .mkString("\n ")}" + ) + List.empty + }, + _ => { + messages.head1.transactions + }, + ) + } + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidator.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidator.scala new file mode 100644 index 0000000000..d2178c6f8b --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidator.scala @@ -0,0 +1,359 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.processing + +import cats.syntax.either.* +import com.digitalasset.canton.crypto.{CryptoPureApi, Fingerprint} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.processing.AuthorizedTopologyTransaction.* +import com.digitalasset.canton.topology.store.{ + TopologyStore, + TopologyStoreId, + TopologyTransactionRejection, + ValidatedTopologyTransaction, +} +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.tracing.TraceContext + +import scala.annotation.nowarn +import scala.concurrent.{ExecutionContext, Future} + +/** Data collection providing information with respect to what is affected by this update + * + * @param authChecks the set of Uids that is mentioned in the transaction such that we can load the certificates for the respective uids + * @param cascadingNamespaces the set of namespaces where we had a namespace delegation change requiring a cascading update + * @param cascadingUids the set of uids where we had a identifier delegation change requiring a cascading update + */ +private[processing] final case class UpdateAggregation( + authChecks: Set[UniqueIdentifier] = Set(), + cascadingNamespaces: Set[Namespace] = Set(), + cascadingUids: Set[UniqueIdentifier] = Set(), +) { + + /** returns all cascading uids which are not already covered by the cascading namespaces */ + def filteredCascadingUids: Set[UniqueIdentifier] = + cascadingUids.filterNot(x => cascadingNamespaces.contains(x.namespace)) + + /** returns true if the given uid is affected by a cascading update */ + def isCascading(uid: UniqueIdentifier): Boolean = + cascadingNamespaces.contains(uid.namespace) || cascadingUids.contains(uid) + + def add(mapping: TopologyMapping): UpdateAggregation = mapping match { + case NamespaceDelegation(ns, _, _) => + // change in certificate requires full recompute for namespace (add could unlock existing certificates, remove could make anything obsolete) + this.copy(cascadingNamespaces = cascadingNamespaces + ns) + case IdentifierDelegation(uid, _) => + // change in identifier delegation requires full recompute for uid + addAuthCheck(uid).copy(cascadingUids = cascadingUids + uid) + case x => + addAuthCheck(x.requiredAuth.uids: _*) + } + + private def addAuthCheck(uid: UniqueIdentifier*): UpdateAggregation = + copy(authChecks = authChecks ++ uid) + + def nothingCascading: Boolean = cascadingNamespaces.isEmpty && cascadingUids.isEmpty + + def authNamespaces: Set[Namespace] = authChecks.map(_.namespace) ++ cascadingNamespaces + +} + +/** validate incoming topology transactions + * + * NOT THREAD SAFE. Note that this class is not thread safe + * + * we check three things: + * (1) are the signatures valid + * (2) are the signatures properly authorized + * a. load current set of authorized keys + * b. for each transaction, verify that the authorization key is valid. a key is a valid authorization if there + * is a certificate chain that originates from the root certificate at the time when the + * transaction is added (one by one). + * c. if the transaction is a namespace or identifier delegation, update its impact on the authorization set + * this means that if we add or remove a namespace delegation, then we need to perform a cascading + * update that activates or deactivates states that depend on this delegation. + * (3) finally, what we compute as the "authorized graph" is then used to compute the derived table + * of "namespace delegations" + */ +class IncomingTopologyTransactionAuthorizationValidator( + cryptoPureApi: CryptoPureApi, + val store: TopologyStore[TopologyStoreId], + domainId: Option[DomainId], + val loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends NamedLogging + with TransactionAuthorizationValidator { + + def reset(): Unit = { + namespaceCache.clear() + identifierDelegationCache.clear() + } + + /** determine whether one of the txs got already added earlier */ + private def findDuplicates( + timestamp: CantonTimestamp, + transactions: Seq[SignedTopologyTransaction[TopologyChangeOp]], + )(implicit traceContext: TraceContext): Future[Seq[Option[EffectiveTime]]] = { + Future.sequence( + transactions.map { tx => + // skip duplication check for non-adds + if (tx.transaction.op != TopologyChangeOp.Add) + Future.successful(None) + else { + // check that the transaction has not been added before (but allow it if it has a different version ...) + store + .findStored(tx) + .map( + _.filter(x => + x.validFrom.value < timestamp && x.transaction.protoVersion == tx.protoVersion && x.transaction == tx + ).map(_.validFrom) + ) + } + } + ) + } + + /** Validates the provided domain topology transactions and applies the certificates to the auth state + * + * When receiving topology transactions we have to evaluate them and continuously apply any + * update to the namespace delegations or identifier delegations to the "head state". + * + * And we use that "head state" to verify if the transactions are authorized or not. + */ + def validateAndUpdateHeadAuthState( + timestamp: CantonTimestamp, + transactions: Seq[SignedTopologyTransaction[TopologyChangeOp]], + )(implicit + traceContext: TraceContext + ): Future[(UpdateAggregation, Seq[ValidatedTopologyTransaction])] = { + + val (updateAggregation, signaturesChecked) = validateSignaturesAndGrabAuthChecks(transactions) + val validateDuplicatesF = findDuplicates(timestamp, transactions) + + val loadGraphsF = loadAuthorizationGraphs(timestamp, updateAggregation.authNamespaces) + val loadUidsF = + loadIdentifierDelegationsCascading(timestamp, updateAggregation, updateAggregation.authChecks) + + logger.debug(s"Update aggregation yielded ${updateAggregation}") + + for { + _ <- loadGraphsF + cascadingUidsFromNamespace <- loadUidsF + validateDuplicates <- validateDuplicatesF + } yield { + val validated = signaturesChecked.zip(validateDuplicates).map { + // two times None means the tx has a valid signature and hasn't been added before + case (ValidatedTopologyTransaction(elem: SignedTopologyTransaction[_], None), None) => + val res = if (processTransaction(elem)) { + None + } else { + Some(TopologyTransactionRejection.NotAuthorized) + } + ValidatedTopologyTransaction(elem, res) + case ( + ValidatedTopologyTransaction(elem: SignedTopologyTransaction[_], None), + Some(knownBefore), + ) => + ValidatedTopologyTransaction( + elem, + Some(TopologyTransactionRejection.Duplicate(knownBefore.value)), + ) + case (v, _) => v + } + // add any uid for which we have a valid identifier delegation to the cascading set (as a new namespace + // certificate might activate an identifier delegation) + ( + updateAggregation.copy(cascadingUids = + updateAggregation.cascadingUids ++ cascadingUidsFromNamespace + ), + validated, + ) + } + } + + private def processTransaction( + elem: SignedTopologyTransaction[TopologyChangeOp] + )(implicit traceContext: TraceContext): Boolean = + elem.transaction match { + case TopologyStateUpdate(op, element) => + element.mapping match { + case nd: NamespaceDelegation => + processNamespaceDelegation( + op, + AuthorizedTopologyTransaction(elem.uniquePath, nd, elem), + ) + case id: IdentifierDelegation => + processIdentifierDelegation( + op, + AuthorizedTopologyTransaction(elem.uniquePath, id, elem), + ) + case _ => isCurrentlyAuthorized(elem) + } + + case _: DomainGovernanceTransaction => isCurrentlyAuthorized(elem) + } + + def getValidSigningKeysForMapping(asOf: CantonTimestamp, mapping: TopologyMapping)(implicit + traceContext: TraceContext + ): Future[Seq[Fingerprint]] = { + @nowarn("msg=match may not be exhaustive") + def intersect(sets: Seq[Set[Fingerprint]]): Set[Fingerprint] = { + sets match { + case Seq() => Set() + case Seq(one, rest @ _*) => + rest.foldLeft(one) { case (acc, elem) => acc.intersect(elem) } + } + } + val loadGF = loadAuthorizationGraphs(asOf, mapping.requiredAuth.namespaces._1.toSet) + val loadIDF = loadIdentifierDelegations(asOf, Seq(), mapping.requiredAuth.uids.toSet) + for { + _ <- loadGF + _ <- loadIDF + } yield { + val (namespaces, requireRoot) = mapping.requiredAuth.namespaces + val fromNs = namespaces + .map { namespace => + getAuthorizationGraphForNamespace(namespace) + .authorizedDelegations() + .filter { auth => + auth.mapping.isRootDelegation || !requireRoot + } + .map(x => x.mapping.target.fingerprint) + .toSet + } + val fromUids = mapping.requiredAuth.uids.map { uid => + identifierDelegationCache.get(uid).toList.flatMap { cache => + val graph = getAuthorizationGraphForNamespace(uid.namespace) + cache + .filter(aid => graph.isValidAuthorizationKey(aid.signingKey, requireRoot = false)) + .map(_.mapping.target.fingerprint) + } + } + val selfSigned = mapping match { + case NamespaceDelegation(_, target, true) => Set(target.fingerprint) + case _ => Set() + } + (intersect(fromUids.map(_.toSet)) ++ intersect(fromNs) ++ selfSigned).toSeq + } + } + + /** loads all identifier delegations into the identifier delegation cache + * + * This function has two "modes". On a cascading update affecting namespaces, we have + * to reload all identifier delegation certificates in order to figure out the affected + * uids. The return Set then contains all the uids that were loaded as a result of the + * namespace query. + * + * If there is no cascading namespace update, we just load the affected uids and return an empty set. + */ + private def loadIdentifierDelegationsCascading( + timestamp: CantonTimestamp, + cascadingUpdate: UpdateAggregation, + transactionUids: Set[UniqueIdentifier], + )(implicit traceContext: TraceContext): Future[Set[UniqueIdentifier]] = { + // we need to load the identifier delegations for all the uids that are mentioned by a transactions + val loadUids = + (transactionUids ++ cascadingUpdate.cascadingUids) -- identifierDelegationCache.keySet + if (loadUids.isEmpty && cascadingUpdate.cascadingNamespaces.isEmpty) { + Future.successful(Set.empty[UniqueIdentifier]) + } else loadIdentifierDelegations(timestamp, cascadingUpdate.cascadingNamespaces.toSeq, loadUids) + } + + private def processIdentifierDelegation( + op: AddRemoveChangeOp, + elem: AuthorizedIdentifierDelegation, + ): Boolean = { + // check authorization + val graph = getAuthorizationGraphForNamespace(elem.mapping.identifier.namespace) + val auth = graph.isValidAuthorizationKey(elem.signingKey, requireRoot = false) + // update identifier delegation cache if necessary + if (auth) { + val updateOp: Set[AuthorizedIdentifierDelegation] => Set[AuthorizedIdentifierDelegation] = + op match { + case TopologyChangeOp.Add => + x => x + elem + case TopologyChangeOp.Remove => + x => // using a filter as the key that authorized the removal might be different that authorized the addition + x.filter(cur => cur.mapping != elem.mapping) + } + updateIdentifierDelegationCache(elem.mapping.identifier, updateOp) + } + auth + } + + private def processNamespaceDelegation( + op: AddRemoveChangeOp, + elem: AuthorizedNamespaceDelegation, + )(implicit traceContext: TraceContext): Boolean = { + val graph = getAuthorizationGraphForNamespace(elem.mapping.namespace) + // add or remove including authorization check + op match { + case TopologyChangeOp.Add => graph.add(elem) + case TopologyChangeOp.Remove => graph.remove(elem) + } + } + + private def validateSignaturesAndGrabAuthChecks( + transactions: Seq[SignedTopologyTransaction[TopologyChangeOp]] + ): (UpdateAggregation, Seq[ValidatedTopologyTransaction]) = { + + def verifySignature( + tx: SignedTopologyTransaction[TopologyChangeOp] + ): Either[TopologyTransactionRejection, Unit] = + tx.verifySignature(cryptoPureApi).leftMap(TopologyTransactionRejection.SignatureCheckFailed) + + def verifyDomain( + tx: SignedTopologyTransaction[TopologyChangeOp] + ): Either[TopologyTransactionRejection, Unit] = + tx.restrictedToDomain match { + case Some(txDomainId) => + Either.cond( + domainId.forall(_ == txDomainId), + (), + TopologyTransactionRejection.WrongDomain(txDomainId), + ) + case None => Right(()) + } + + // we need to figure out for which namespaces and uids we need to load the validation checks + // and for which uids and namespaces we'll have to perform a cascading update + transactions.foldLeft((UpdateAggregation(), Seq.empty[ValidatedTopologyTransaction])) { + case ((cascadingUpdate, acc), x) => + val res = (for { + _ <- verifySignature(x) + _ <- verifyDomain(x) + } yield ()) match { + case Right(()) => None + case Left(err) => + Some(err) + } + val cc = res.fold(cascadingUpdate.add(x.transaction.element.mapping))(_ => cascadingUpdate) + (cc, acc :+ ValidatedTopologyTransaction(x, res)) + } + } + + def authorizedNamespaceDelegationsForNamespaces( + namespaces: Set[Namespace] + ): Seq[AuthorizedNamespaceDelegation] = + for { + ns <- namespaces.toList + gr <- namespaceCache.get(ns).toList + item <- gr.authorizedDelegations() + } yield item + + def authorizedIdentifierDelegationsForUid( + uid: UniqueIdentifier + ): Seq[AuthorizedIdentifierDelegation] = { + val ret = for { + graph <- namespaceCache.get(uid.namespace) + items <- identifierDelegationCache.get(uid) + } yield items + .filter(x => graph.isValidAuthorizationKey(x.signingKey, requireRoot = false)) + .toSeq + ret.getOrElse(Seq()) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidatorX.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidatorX.scala new file mode 100644 index 0000000000..5d59bb5ae6 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidatorX.scala @@ -0,0 +1,380 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.processing + +import cats.Monoid +import cats.data.EitherT +import cats.syntax.parallel.* +import com.digitalasset.canton.crypto.CryptoPureApi +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.processing.AuthorizedTopologyTransactionX.{ + AuthorizedIdentifierDelegationX, + AuthorizedNamespaceDelegationX, + AuthorizedUnionspaceDefinitionX, +} +import com.digitalasset.canton.topology.store.ValidatedTopologyTransactionX.GenericValidatedTopologyTransactionX +import com.digitalasset.canton.topology.store.* +import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX +import com.digitalasset.canton.topology.transaction.TopologyMappingX.MappingHash +import com.digitalasset.canton.topology.transaction.TopologyTransactionX.GenericTopologyTransactionX +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.FutureInstances.* + +import scala.concurrent.{ExecutionContext, Future} + +/** Data collection providing information with respect to what is affected by this update + * + * @param authChecks the set of Uids that is mentioned in the transaction such that we can load the certificates for the respective uids + * @param cascadingNamespaces the set of namespaces where we had a namespace delegation change requiring a cascading update + * @param cascadingUids the set of uids where we had a identifier delegation change requiring a cascading update + */ +private[processing] final case class UpdateAggregationX( + authChecks: Set[UniqueIdentifier] = Set(), + cascadingNamespaces: Set[Namespace] = Set(), + cascadingUids: Set[UniqueIdentifier] = Set(), +) { + + /** returns all cascading uids which are not already covered by the cascading namespaces */ + def filteredCascadingUids: Set[UniqueIdentifier] = + cascadingUids.filterNot(x => cascadingNamespaces.contains(x.namespace)) + + /** returns true if the given uid is affected by a cascading update */ + def isCascading(uid: UniqueIdentifier): Boolean = + cascadingNamespaces.contains(uid.namespace) || cascadingUids.contains(uid) + + def add( + mapping: TopologyMappingX, + currentTransaction: Option[GenericTopologyTransactionX], + ): UpdateAggregationX = mapping match { + case NamespaceDelegationX(ns, _, _) => + // change in certificate requires full recompute for namespace (add could unlock existing certificates, remove could make anything obsolete) + this.copy(cascadingNamespaces = cascadingNamespaces + ns) + case IdentifierDelegationX(uid, _) => + // change in identifier delegation requires full recompute for uid + this.copy(cascadingUids = cascadingUids + uid, authChecks = authChecks + uid) + case UnionspaceDefinitionX(ns, _, owners) => + // change in unionspace definition requires full recompute + this.copy(cascadingNamespaces = cascadingNamespaces + ns ++ owners) + case x => + this.copy(authChecks = + authChecks ++ mapping.requiredAuth(currentTransaction).authorizations.uids + ) + } + + def nothingCascading: Boolean = cascadingNamespaces.isEmpty && cascadingUids.isEmpty + + def authNamespaces: Set[Namespace] = authChecks.map(_.namespace) ++ cascadingNamespaces +} + +object UpdateAggregationX { + implicit val monoid: Monoid[UpdateAggregationX] = new Monoid[UpdateAggregationX] { + override def empty: UpdateAggregationX = UpdateAggregationX() + + override def combine(x: UpdateAggregationX, y: UpdateAggregationX): UpdateAggregationX = + UpdateAggregationX( + authChecks = x.authChecks ++ y.authChecks, + cascadingNamespaces = x.cascadingNamespaces ++ y.cascadingNamespaces, + cascadingUids = x.cascadingUids ++ y.cascadingUids, + ) + } +} + +/** validate incoming topology transactions + * + * NOT THREAD SAFE. Note that this class is not thread safe + * + * we check three things: + * (1) are the signatures valid + * (2) are the signatures properly authorized + * a. load current set of authorized keys + * b. for each transaction, verify that the authorization keys are valid. a key is a valid authorization if there + * is a certificate chain that originates from the root certificate at the time when the + * transaction is added (one by one). + * c. if the transaction is a namespace or identifier delegation, update its impact on the authorization set + * this means that if we add or remove a namespace delegation, then we need to perform a cascading + * update that activates or deactivates states that depend on this delegation. + * (3) finally, what we compute as the "authorized graph" is then used to compute the derived table + * of "namespace delegations" + */ +class IncomingTopologyTransactionAuthorizationValidatorX( + val pureCrypto: CryptoPureApi, + val store: TopologyStoreX[TopologyStoreId], + domainId: Option[DomainId], + val loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends NamedLogging + with TransactionAuthorizationValidatorX { + + def reset(): Unit = { + namespaceCache.clear() + identifierDelegationCache.clear() + unionspaceCache.clear() + } + + /** Validates the provided topology transactions and applies the certificates to the auth state + * + * When receiving topology transactions we have to evaluate them and continuously apply any + * update to the namespace delegations or identifier delegations to the "head state". + * + * And we use that "head state" to verify if the transactions are authorized or not. + */ + def validateAndUpdateHeadAuthState( + timestamp: CantonTimestamp, + transactionsToValidate: Seq[GenericSignedTopologyTransactionX], + transactionsInStore: Map[MappingHash, GenericSignedTopologyTransactionX], + expectFullAuthorization: Boolean, + )(implicit + traceContext: TraceContext + ): Future[(UpdateAggregationX, Seq[GenericValidatedTopologyTransactionX])] = { + for { + authCheckResult <- determineRelevantUidsAndNamespaces( + transactionsToValidate, + transactionsInStore.view.mapValues(_.transaction).toMap, + ) + (updateAggregation, targetDomainVerified) = authCheckResult + loadGraphsF = loadAuthorizationGraphs(timestamp, updateAggregation.authNamespaces) + loadUidsF = loadIdentifierDelegationsCascading( + timestamp, + updateAggregation, + updateAggregation.authChecks, + ) + _ <- loadGraphsF + cascadingUidsFromNamespace <- loadUidsF + } yield { + + logger.debug(s"Update aggregation yielded ${updateAggregation}") + val validated = targetDomainVerified.map { + case ValidatedTopologyTransactionX(tx, None, _) => + processTransaction( + tx, + transactionsInStore.get(tx.transaction.mapping.uniqueKey), + expectFullAuthorization, + ) + case v => v + } + // add any uid for which we have a valid identifier delegation to the cascading set (as a new namespace + // certificate might activate an identifier delegation) + ( + updateAggregation.copy(cascadingUids = + updateAggregation.cascadingUids ++ cascadingUidsFromNamespace + ), + validated, + ) + } + } + + private def processTransaction( + toValidate: GenericSignedTopologyTransactionX, + inStore: Option[GenericSignedTopologyTransactionX], + expectFullAuthorization: Boolean, + )(implicit traceContext: TraceContext): GenericValidatedTopologyTransactionX = { + val processedNs = toValidate.selectMapping[NamespaceDelegationX].forall { sigTx => + processNamespaceDelegation( + toValidate.transaction.op, + AuthorizedTopologyTransactionX(sigTx), + ) + } + + val processedIdent = toValidate.selectMapping[IdentifierDelegationX].forall { sigTx => + processIdentifierDelegation( + toValidate.transaction.op, + AuthorizedTopologyTransactionX(sigTx), + ) + } + + val resultUs = toValidate.selectMapping[UnionspaceDefinitionX].map { sigTx => + processUnionspaceDefinition(sigTx.transaction.op, AuthorizedTopologyTransactionX(sigTx)) + } + val processedUs = resultUs.forall(_._1) + val mappingSpecificCheck = processedNs && processedIdent && processedUs + + val rejectionOrMissingAuthorizers = isCurrentlyAuthorized(toValidate, inStore) + + // The mappingSpecificCheck is a necessary condition for having sufficient authorizers. + val hasSufficientAuthorizers = + mappingSpecificCheck && rejectionOrMissingAuthorizers.exists(_.isEmpty) + // Conversely a failing mappingSpecificCheck implies missing authorizers irrespective + // of whether the generic auth check finds missing signatures (e.g. in a unionspace + // only the mappingSpecificCheck sees if the USD owner threshold is satisfied). + val hasMissingAuthorizers = + rejectionOrMissingAuthorizers.exists(!_.isEmpty || !mappingSpecificCheck) + + // the transaction is fully authorized if either + // 1. it's a root certificate, or + // 2. there is no authorization error and there are no missing authorizers + // We need to check explicitly for the root certificate here, because a REMOVE operation + // removes itself from the authorization graph, and therefore `isCurrentlyAuthorized` cannot validate it. + val isFullyAuthorized = + NamespaceDelegationX.isRootCertificate(toValidate) || hasSufficientAuthorizers + + // If a unionspace transaction is fully authorized, reflect so in the unionspace cache. + // Note: It seems a bit unsafe to update the caches on the assumption that the update will also be eventually + // persisted by the caller (a few levels up the call chain in TopologyStateProcessorX.validateAndApplyAuthorization + // as the caller performs additional checks such as the numeric value of the serial number). + // But at least this is safer than where the check was previously (inside processUnionspaceDefinition before even + // `isCurrentlyAuthorized` above had finished all checks). + if (isFullyAuthorized) { + resultUs.foreach { case (_, updateUnionspaceCache) => updateUnionspaceCache() } + } + + val acceptMissingAuthorizers = !expectFullAuthorization && hasMissingAuthorizers + + val finalTransaction = toValidate.copy(isProposal = !isFullyAuthorized) + + // Either the transaction is fully authorized or the request allows partial authorization + if (isFullyAuthorized || acceptMissingAuthorizers) { + ValidatedTopologyTransactionX(finalTransaction, None) + } else { + ValidatedTopologyTransactionX( + toValidate, + rejectionOrMissingAuthorizers.left.toOption.orElse( + Some(TopologyTransactionRejection.NotAuthorized) + ), + ) + } + } + + /** loads all identifier delegations into the identifier delegation cache + * + * This function has two "modes". On a cascading update affecting namespaces, we have + * to reload all identifier delegation certificates in order to figure out the affected + * uids. The return Set then contains all the uids that were loaded as a result of the + * namespace query. + * + * If there is no cascading namespace update, we just load the affected uids and return an empty set. + */ + private def loadIdentifierDelegationsCascading( + timestamp: CantonTimestamp, + cascadingUpdate: UpdateAggregationX, + transactionUids: Set[UniqueIdentifier], + )(implicit traceContext: TraceContext): Future[Set[UniqueIdentifier]] = { + // we need to load the identifier delegations for all the uids that are mentioned by a transactions + val loadUids = + (transactionUids ++ cascadingUpdate.cascadingUids) -- identifierDelegationCache.keySet + if (loadUids.isEmpty && cascadingUpdate.cascadingNamespaces.isEmpty) { + Future.successful(Set.empty[UniqueIdentifier]) + } else loadIdentifierDelegations(timestamp, cascadingUpdate.cascadingNamespaces.toSeq, loadUids) + } + + private def processIdentifierDelegation( + op: TopologyChangeOpX, + tx: AuthorizedIdentifierDelegationX, + ): Boolean = { + // check authorization + val check = getAuthorizationCheckForNamespace(tx.mapping.identifier.namespace) + val keysAreValid = check.areValidAuthorizationKeys(tx.signingKeys, requireRoot = false) + // update identifier delegation cache if necessary + if (keysAreValid) { + val updateOp: Set[AuthorizedIdentifierDelegationX] => Set[AuthorizedIdentifierDelegationX] = + op match { + case TopologyChangeOpX.Replace => + x => x + tx + case TopologyChangeOpX.Remove => + x => // using a filter as the key that authorized the removal might be different that authorized the addition + x.filter(cur => cur.mapping != tx.mapping) + } + updateIdentifierDelegationCache(tx.mapping.identifier, updateOp) + } + keysAreValid + } + + private def processNamespaceDelegation( + op: TopologyChangeOpX, + tx: AuthorizedNamespaceDelegationX, + )(implicit traceContext: TraceContext): Boolean = { + val graph = getAuthorizationGraphForNamespace(tx.mapping.namespace) + // add or remove including authorization check + op match { + case TopologyChangeOpX.Replace => graph.add(tx) + case TopologyChangeOpX.Remove => graph.remove(tx) + } + } + + /** Process unionspace definition + * + * return whether unionspace definition mapping is authorizable along with a "cache-update function" to be invoked + * by the caller once the mapping is to be committed. + */ + private def processUnionspaceDefinition( + op: TopologyChangeOpX, + tx: AuthorizedUnionspaceDefinitionX, + ): (Boolean, () => Unit) = { + val unionspace = tx.mapping.unionspace + val (auth, usGraph) = unionspaceCache + .get(unionspace) + .map { case (_, usGraph) => + val auth = usGraph.areValidAuthorizationKeys(tx.signingKeys, false) + auth -> usGraph + } + .getOrElse { + val directUnionspaceGraph = namespaceCache.getOrElseUpdate( + unionspace, + new AuthorizationGraphX( + unionspace, + extraDebugInfo = false, + loggerFactory, + ), + ) + val ownerGraphs = tx.mapping.owners.forgetNE.toSeq.map(getAuthorizationGraphForNamespace) + val newUnionspaceGraph = UnionspaceAuthorizationGraphX( + tx.mapping, + directUnionspaceGraph, + ownerGraphs, + ) + val auth = newUnionspaceGraph.areValidAuthorizationKeys(tx.signingKeys, false) + (auth, newUnionspaceGraph) + } + + ( + auth, + () => { + val ownerGraphs = tx.mapping.owners.forgetNE.toSeq.map(getAuthorizationGraphForNamespace) + unionspaceCache + .put(unionspace, (tx.mapping, usGraph.copy(us = tx.mapping, ownerGraphs = ownerGraphs))) + .discard + }, + ) + } + + private def determineRelevantUidsAndNamespaces( + transactionsToValidate: Seq[GenericSignedTopologyTransactionX], + transactionsInStore: Map[MappingHash, GenericTopologyTransactionX], + ): Future[(UpdateAggregationX, Seq[GenericValidatedTopologyTransactionX])] = { + def verifyDomain( + tx: GenericSignedTopologyTransactionX + ): Either[TopologyTransactionRejection, Unit] = + tx.restrictedToDomain match { + case Some(txDomainId) => + Either.cond( + domainId.forall(_ == txDomainId), + (), + TopologyTransactionRejection.WrongDomain(txDomainId), + ) + case None => Right(()) + } + + // we need to figure out for which namespaces and uids we need to load the validation checks + // and for which uids and namespaces we'll have to perform a cascading update + import UpdateAggregationX.monoid + transactionsToValidate.parFoldMapA { toValidate => + EitherT + .fromEither[Future](verifyDomain(toValidate)) + .fold( + rejection => + (UpdateAggregationX(), Seq(ValidatedTopologyTransactionX(toValidate, Some(rejection)))), + _ => + ( + UpdateAggregationX().add( + toValidate.transaction.mapping, + transactionsInStore.get(toValidate.transaction.mapping.uniqueKey), + ), + Seq(ValidatedTopologyTransactionX(toValidate, None)), + ), + ) + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/SnapshotAuthorizationValidator.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/SnapshotAuthorizationValidator.scala new file mode 100644 index 0000000000..6cd8718741 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/SnapshotAuthorizationValidator.scala @@ -0,0 +1,192 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.processing + +import cats.data.EitherT +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown, Lifecycle} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.topology.processing.TransactionAuthorizationValidator.AuthorizationChain +import com.digitalasset.canton.topology.store.TopologyStoreId.AuthorizedStore +import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore +import com.digitalasset.canton.topology.store.{ + StoredTopologyTransactions, + TopologyStore, + TopologyStoreId, + ValidatedTopologyTransaction, +} +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.topology.{Namespace, UniqueIdentifier} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.{MonadUtil, SimpleExecutionQueue} + +import scala.concurrent.{ExecutionContext, Future} + +/** Compute the authorization chain for a certain UID */ +class SnapshotAuthorizationValidator( + asOf: CantonTimestamp, + val store: TopologyStore[TopologyStoreId], + override val timeouts: ProcessingTimeout, + val loggerFactory: NamedLoggerFactory, + futureSupervisor: FutureSupervisor, +)(implicit executionContext: ExecutionContext) + extends TransactionAuthorizationValidator + with NamedLogging + with FlagCloseable { + + private val sequential = new SimpleExecutionQueue( + "snapshot-authorization-validator-queue", + futureSupervisor, + timeouts, + loggerFactory, + ) + + def authorizedBy( + transaction: SignedTopologyTransaction[TopologyChangeOp] + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Option[AuthorizationChain]] = { + // preload our cache. note, we don't want to load stuff into our cache concurrently, so we + // squeeze this through a sequential execution queue + val preloadF = transaction.transaction.element.mapping.requiredAuth match { + case RequiredAuth.Ns(namespace, _) => + sequential.execute( + loadAuthorizationGraphs( + asOf, + Set(namespace), + ), + functionFullName, + ) + case RequiredAuth.Uid(uids) => + sequential.execute( + { + val graphF = loadAuthorizationGraphs(asOf, uids.map(_.namespace).toSet) + val delF = loadIdentifierDelegations(asOf, Seq.empty, uids.toSet) + graphF.zip(delF) + }, + functionFullName, + ) + } + + preloadF.map { _ => + authorizationChainFor(transaction) + } + } + + def removeNamespaceDelegationFromCache( + namespace: Namespace, + nsd: StoredTopologyTransactions[TopologyChangeOp], + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Unit] = + sequential.execute( + Future { + namespaceCache + .get(namespace) + .fold(())(ag => + ag.unauthorizedRemove(nsd.toAuthorizedTopologyTransactions { + case x: NamespaceDelegation => x + }) + ) + }, + functionFullName, + ) + + def removeIdentifierDelegationFromCache( + uid: UniqueIdentifier, + nsd: StoredTopologyTransactions[TopologyChangeOp], + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Unit] = + sequential.execute( + Future { + val authorizedNsd = nsd.toAuthorizedTopologyTransactions { case x: IdentifierDelegation => + x + } + updateIdentifierDelegationCache(uid, { _.filterNot(Seq(_) == authorizedNsd) }) + }, + functionFullName, + ) + + def reset()(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Unit] = + sequential.execute( + Future { + identifierDelegationCache.clear() + namespaceCache.clear() + }, + functionFullName, + ) + + override protected def onClosed(): Unit = Lifecycle.close { + sequential + }(logger) + +} + +object SnapshotAuthorizationValidator { + + def validateTransactions( + timeouts: ProcessingTimeout, + futureSupervisor: FutureSupervisor, + loggerFactory: NamedLoggerFactory, + )( + transactions: List[SignedTopologyTransaction[TopologyChangeOp]] + )(implicit + traceContext: TraceContext, + executionContext: ExecutionContext, + ): EitherT[FutureUnlessShutdown, String, TopologyStore[AuthorizedStore]] = { + val store = + new InMemoryTopologyStore( + AuthorizedStore, + loggerFactory, + timeouts, + futureSupervisor, + ) + val validator = + new SnapshotAuthorizationValidator( + CantonTimestamp.MaxValue, + store, + timeouts, + loggerFactory, + futureSupervisor, + ) + + def requiresReset(tx: SignedTopologyTransaction[TopologyChangeOp]): Boolean = + tx.transaction.element.mapping.dbType == DomainTopologyTransactionType.NamespaceDelegation || + tx.transaction.element.mapping.dbType == DomainTopologyTransactionType.IdentifierDelegation + + // check that all transactions are authorized + val tmp: EitherT[FutureUnlessShutdown, String, Unit] = EitherT( + MonadUtil.foldLeftM(Right(()): Either[String, Unit], transactions.zipWithIndex) { + case (Right(_), (tx, idx)) => + val ts = CantonTimestamp.Epoch.plusMillis(idx.toLong) + // incrementally add it to the store and check the validation + for { + isValidated <- validator.authorizedBy(tx).map(_.nonEmpty) + _ <- FutureUnlessShutdown.outcomeF( + store.append( + SequencedTime(ts), + EffectiveTime(ts), + Seq(ValidatedTopologyTransaction(tx, None)), + ) + ) + // if the transaction was a namespace delegation, drop it + _ <- if (requiresReset(tx)) validator.reset() else FutureUnlessShutdown.unit + } yield Either.cond( + isValidated, + (), + s"Unauthorized topology transaction: $tx", + ) + case (acc, _) => FutureUnlessShutdown.pure(acc) + } + ) + tmp.map(_ => store) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TerminateProcessing.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TerminateProcessing.scala new file mode 100644 index 0000000000..6b7cc8e7f9 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TerminateProcessing.scala @@ -0,0 +1,39 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.processing + +import com.digitalasset.canton.SequencerCounter +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.Future + +/** An implementation of this trait allows to schedule code to be executed at the end of the + * processing of a batch of topology transactions. + * On the participant, this *must* tick the record order publisher before returning. + */ +trait TerminateProcessing { + + /** Changes to the topology stores need to be persisted before this method is called. + */ + def terminate( + sc: SequencerCounter, + sequencedTime: SequencedTime, + effectiveTime: EffectiveTime, + )(implicit traceContext: TraceContext): Future[Unit] +} + +object TerminateProcessing { + + /** On the participant, [[TerminateProcessing.terminate]] should tick the record order publisher when processing + * is finished. Hence, this no-op terminate processing should be used only in domain nodes. + */ + + private[processing] object NoOpTerminateTopologyProcessing extends TerminateProcessing { + override def terminate( + sc: SequencerCounter, + sequencedTime: SequencedTime, + effectiveTime: EffectiveTime, + )(implicit traceContext: TraceContext): Future[Unit] = Future.unit + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTimes.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTimes.scala new file mode 100644 index 0000000000..010f599b67 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTimes.scala @@ -0,0 +1,46 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.processing + +import com.digitalasset.canton.LfTimestamp +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.google.protobuf.timestamp.Timestamp as ProtoTimestamp + +final case class EffectiveTime(value: CantonTimestamp) { + def toApproximate: ApproximateTime = ApproximateTime(value) + + def toProtoPrimitive: ProtoTimestamp = value.toProtoPrimitive + def toLf: LfTimestamp = value.toLf + def max(that: EffectiveTime): EffectiveTime = + EffectiveTime(value.max(that.value)) +} +object EffectiveTime { + val MinValue: EffectiveTime = EffectiveTime(CantonTimestamp.MinValue) + val MaxValue: EffectiveTime = EffectiveTime(CantonTimestamp.MaxValue) + implicit val orderingEffectiveTime: Ordering[EffectiveTime] = + Ordering.by[EffectiveTime, CantonTimestamp](_.value) + def fromProtoPrimitive(ts: ProtoTimestamp): ParsingResult[EffectiveTime] = + CantonTimestamp.fromProtoPrimitive(ts).map(EffectiveTime(_)) +} + +final case class ApproximateTime(value: CantonTimestamp) +object ApproximateTime { + val MinValue: ApproximateTime = ApproximateTime(CantonTimestamp.MinValue) + val MaxValue: ApproximateTime = ApproximateTime(CantonTimestamp.MaxValue) + implicit val orderingApproximateTime: Ordering[ApproximateTime] = + Ordering.by[ApproximateTime, CantonTimestamp](_.value) +} + +final case class SequencedTime(value: CantonTimestamp) { + def toProtoPrimitive: ProtoTimestamp = value.toProtoPrimitive + def toLf: LfTimestamp = value.toLf +} +object SequencedTime { + val MinValue: SequencedTime = SequencedTime(CantonTimestamp.MinValue) + implicit val orderingSequencedTime: Ordering[SequencedTime] = + Ordering.by[SequencedTime, CantonTimestamp](_.value) + def fromProtoPrimitive(ts: ProtoTimestamp): ParsingResult[SequencedTime] = + CantonTimestamp.fromProtoPrimitive(ts).map(SequencedTime(_)) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTimestampPlusEpsilonTracker.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTimestampPlusEpsilonTracker.scala new file mode 100644 index 0000000000..38e98dd32f --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTimestampPlusEpsilonTracker.scala @@ -0,0 +1,427 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.processing + +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.{ + FlagCloseable, + FutureUnlessShutdown, + PromiseUnlessShutdown, + UnlessShutdown, +} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.protocol.DynamicDomainParameters +import com.digitalasset.canton.time.* +import com.digitalasset.canton.topology.store.TopologyStore.Change +import com.digitalasset.canton.topology.store.{TopologyStore, TopologyStoreId, TopologyStoreX} +import com.digitalasset.canton.topology.transaction.{ + DomainParametersChange, + DomainParametersStateX, + DomainTopologyTransactionType, +} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.{ErrorUtil, FutureUtil} + +import java.util.ConcurrentModificationException +import java.util.concurrent.atomic.AtomicReference +import scala.annotation.tailrec +import scala.concurrent.ExecutionContext +import scala.util.{Failure, Success} + +/** Compute and synchronise the effective timestamps + * + * Transaction validation and processing depends on the topology state at the given sequencer time. + * Therefore, we would have to inspect every event first if there is a topology state and wait until all + * the topology processing has finished before evaluating the transaction. This would be slow and sequential. + * + * Therefore, we future date our topology transactions with an "effective time", computed from + * the sequencerTime + domainParameters.topologyChangeDelay. + * + * However, the domainParameters can change and so can the topologyChangeDelay. Therefore we need to be a bit careful + * when computing the effective time and track the changes to the topologyChangeDelay parameter accordingly. + * + * This class (hopefully) takes care of this logic + */ +class TopologyTimestampPlusEpsilonTracker( + val timeouts: ProcessingTimeout, + val loggerFactory: NamedLoggerFactory, + futureSupervisor: FutureSupervisor, +) extends NamedLogging + with TimeAwaiter + with FlagCloseable { + + override protected def onClosed(): Unit = { + expireTimeAwaiter() + super.onClosed() + } + + /** track changes to epsilon carefully. + * + * increasing the epsilon is straight forward. reducing it requires more care + * + * @param epsilon the epsilon is the time we add to the timestamp + * @param validFrom from when on should this one be valid (as with all topology transactions, the time is exclusive) + */ + private case class State(epsilon: NonNegativeFiniteDuration, validFrom: EffectiveTime) + + /** sorted list of epsilon updates (in descending order of sequencing) */ + private val state = new AtomicReference[List[State]](List()) + + /** protect us against broken domains that send topology transactions in times when they've just reduced the + * epsilon in a way that could lead the second topology transaction to take over the epsilon change. + */ + private val uniqueUpdateTime = new AtomicReference[EffectiveTime](EffectiveTime.MinValue) + + private val lastEffectiveTimeProcessed = + new AtomicReference[EffectiveTime](EffectiveTime.MinValue) + + private val sequentialWait = + new AtomicReference[FutureUnlessShutdown[EffectiveTime]]( + FutureUnlessShutdown.pure(EffectiveTime.MinValue) + ) + + override protected def currentKnownTime: CantonTimestamp = lastEffectiveTimeProcessed.get().value + + private def adjustByEpsilon( + sequencingTime: SequencedTime + )(implicit traceContext: TraceContext): EffectiveTime = { + @tailrec + def go(items: List[State]): NonNegativeFiniteDuration = items match { + case item :: _ if sequencingTime.value > item.validFrom.value => + item.epsilon + case last :: Nil => + if (sequencingTime.value < last.validFrom.value) + logger.error( + s"Bad sequencing time $sequencingTime with last known epsilon update at ${last}" + ) + last.epsilon + case Nil => + logger.error( + s"Epsilon tracker is not initialised at sequencing time ${sequencingTime}, will use default value ${DynamicDomainParameters.topologyChangeDelayIfAbsent}" + ) + DynamicDomainParameters.topologyChangeDelayIfAbsent // we use this (0) as a safe default + case _ :: rest => go(rest) + } + val epsilon = go(state.get()) + EffectiveTime(sequencingTime.value.plus(epsilon.duration)) + } + + // must call effectiveTimeProcessed in due time + def adjustTimestampForUpdate(sequencingTime: SequencedTime)(implicit + traceContext: TraceContext, + executionContext: ExecutionContext, + ): FutureUnlessShutdown[EffectiveTime] = + synchronize( + sequencingTime, { + val adjusted = adjustByEpsilon(sequencingTime) + val monotonic = { + // if a broken domain manager sends us an update too early after an epsilon reduction, we'll catch that and + // ensure that we don't store txs in an out of order way + // i.e. if we get at t1 an update with epsilon_1 < epsilon_0, then we do have to ensure that no topology + // transaction is sent until t1 + (epsilon_0 - epsilon_1) (as this is the threshold for any message just before t1) + // but if the topology manager still does it, we'll just work-around + uniqueUpdateTime.updateAndGet(cur => + if (cur.value >= adjusted.value) EffectiveTime(cur.value.immediateSuccessor) + else adjusted + ) + } + if (monotonic != adjusted) { + logger.error( + s"Broken or malicious domain topology manager is sending transactions during epsilon changes at ts=$sequencingTime!" + ) + } + monotonic + }, + ) + + private def synchronize( + sequencingTime: SequencedTime, + computeEffective: => EffectiveTime, + )(implicit + traceContext: TraceContext, + executionContext: ExecutionContext, + ): FutureUnlessShutdown[EffectiveTime] = { + // note, this is a side effect free chain await + def chainUpdates(previousEffectiveTime: EffectiveTime): FutureUnlessShutdown[EffectiveTime] = { + FutureUnlessShutdown( + FutureUtil.logOnFailure( + { + val synchronizeAt = previousEffectiveTime.value.min(sequencingTime.value) + awaitKnownTimestampUS(synchronizeAt) match { + case None => FutureUnlessShutdown.pure(computeEffective) + case Some(value) => + logger.debug( + s"Need to wait until topology processing has caught up at $sequencingTime (must reach $synchronizeAt with current=${currentKnownTime})" + ) + value.map { _ => + logger.debug(s"Topology processing caught up at $sequencingTime") + computeEffective + } + } + }.unwrap, + "chaining of sequential waits failed", + ) + ) + } + val nextChainP = new PromiseUnlessShutdown[EffectiveTime]( + "synchronized-chain-promise", + futureSupervisor, + ) + val ret = + sequentialWait.getAndUpdate(cur => cur.flatMap(_ => FutureUnlessShutdown(nextChainP.future))) + ret.onComplete { + case Success(UnlessShutdown.AbortedDueToShutdown) => + nextChainP.shutdown() + case Success(UnlessShutdown.Outcome(previousEffectiveTime)) => + chainUpdates(previousEffectiveTime) + .map { effectiveTime => + nextChainP.outcome(effectiveTime) + } + .onShutdown(nextChainP.shutdown()) + .discard + case Failure(exception) => nextChainP.failure(exception) + } + nextChainP.futureUS + } + + def effectiveTimeProcessed(effectiveTime: EffectiveTime): Unit = { + val updated = lastEffectiveTimeProcessed.updateAndGet(_.max(effectiveTime)) + notifyAwaitedFutures(updated.value) + } + + def adjustTimestampForTick(sequencingTime: SequencedTime)(implicit + traceContext: TraceContext, + executionContext: ExecutionContext, + ): FutureUnlessShutdown[EffectiveTime] = synchronize( + sequencingTime, { + val adjusted = adjustByEpsilon(sequencingTime) + val monotonic = uniqueUpdateTime.updateAndGet(_.max(adjusted)) + effectiveTimeProcessed(monotonic) + monotonic + }, + ) + + /** adjust epsilon if it changed + * + * @return [[scala.None$]] if epsilon is unchanged or wasn't set before (e.g. when called for the first time), + * otherwise [[scala.Some$]] the previous epsilon + */ + def adjustEpsilon( + effectiveTime: EffectiveTime, + sequencingTime: SequencedTime, + epsilon: NonNegativeFiniteDuration, + )(implicit traceContext: TraceContext): Option[NonNegativeFiniteDuration] = { + val oldStates = state.get() + val currentState = oldStates.headOption + val ext = State(epsilon, effectiveTime) + ErrorUtil.requireArgument( + currentState.forall(_.validFrom.value < effectiveTime.value), + s"Invalid epsilon adjustment from $currentState to $ext", + ) + if (!currentState.exists(_.epsilon == epsilon)) { + // we prepend this new datapoint and + // keep everything which is not yet valid and the first item which is valid before the sequencing time + val (effectivesAtOrAfterSequencing, effectivesBeforeSequencing) = + oldStates.span(_.validFrom.value >= sequencingTime.value) + val newStates = + ext +: (effectivesAtOrAfterSequencing ++ effectivesBeforeSequencing.headOption.toList) + + if (!state.compareAndSet(oldStates, newStates)) { + ErrorUtil.internalError( + new ConcurrentModificationException( + s"Topology change delay was updated concurrently. Effective time $effectiveTime, sequencing time $sequencingTime, epsilon $epsilon" + ) + ) + } + currentState.map(_.epsilon) + } else None + } + +} + +object TopologyTimestampPlusEpsilonTracker { + + def epsilonForTimestamp( + store: TopologyStore[TopologyStoreId.DomainStore], + asOfExclusive: CantonTimestamp, + )(implicit + traceContext: TraceContext, + executionContext: ExecutionContext, + ): FutureUnlessShutdown[TopologyStore.Change.TopologyDelay] = { + FutureUnlessShutdown + .outcomeF( + store + .findPositiveTransactions( + asOf = asOfExclusive, + asOfInclusive = false, + includeSecondary = false, + types = Seq(DomainTopologyTransactionType.DomainParameters), + filterUid = None, + filterNamespace = None, + ) + ) + .map { txs => + txs.replaces.result + .map(x => (x.transaction.transaction.element.mapping, x)) + .collectFirst { case (change: DomainParametersChange, tx) => + TopologyStore.Change.TopologyDelay( + tx.sequenced, + tx.validFrom, + change.domainParameters.topologyChangeDelay, + ) + } + .getOrElse( + TopologyStore.Change.TopologyDelay( + SequencedTime(CantonTimestamp.MinValue), + EffectiveTime(CantonTimestamp.MinValue), + DynamicDomainParameters.topologyChangeDelayIfAbsent, + ) + ) + } + } + + /** Initialize tracker + * + * @param processorTs Timestamp strictly (just) before the first message that will be passed: + * No sequenced events may have been passed in earlier crash epochs whose + * timestamp is strictly between `processorTs` and the first message that + * will be passed if these events affect the topology change delay. + * Normally, it's the timestamp of the last message that was successfully + * processed before the one that will be passed first. + */ + def initialize( + tracker: TopologyTimestampPlusEpsilonTracker, + store: TopologyStore[TopologyStoreId.DomainStore], + processorTs: CantonTimestamp, + )(implicit + traceContext: TraceContext, + executionContext: ExecutionContext, + ): FutureUnlessShutdown[EffectiveTime] = for { + // find the epsilon of a dpc asOf processorTs (which means it is exclusive) + epsilonAtProcessorTs <- epsilonForTimestamp(store, processorTs) + // find also all upcoming changes which have effective >= processorTs && sequenced <= processorTs + // the change that makes up the epsilon at processorTs would be grabbed by the statement above + upcoming <- tracker.performUnlessClosingF(functionFullName)( + store + .findUpcomingEffectiveChanges(processorTs) + .map(_.collect { + case tdc: Change.TopologyDelay + // filter anything out that might be replayed + if tdc.sequenced.value <= processorTs => + tdc + }) + ) + allPending = (epsilonAtProcessorTs +: upcoming).sortBy(_.sequenced) + _ = { + tracker.logger.debug( + s"Initialising with $allPending" + ) + // Now, replay all the older epsilon updates that might get activated shortly + allPending.foreach { change => + tracker + .adjustEpsilon( + change.effective, + change.sequenced, + change.epsilon, + ) + .discard[Option[NonNegativeFiniteDuration]] + } + } + eff <- tracker.adjustTimestampForTick(SequencedTime(processorTs)) + } yield eff + + /** Initialize tracker + * + * @param processorTs Timestamp strictly (just) before the first message that will be passed: + * No sequenced events may have been passed in earlier crash epochs whose + * timestamp is strictly between `processorTs` and the first message that + * will be passed if these events affect the topology change delay. + * Normally, it's the timestamp of the last message that was successfully + * processed before the one that will be passed first. + */ + def initializeX( + tracker: TopologyTimestampPlusEpsilonTracker, + store: TopologyStoreX[TopologyStoreId.DomainStore], + processorTs: CantonTimestamp, + )(implicit + traceContext: TraceContext, + executionContext: ExecutionContext, + ): FutureUnlessShutdown[EffectiveTime] = for { + // find the epsilon of a dpc asOf processorTs (which means it is exclusive) + epsilonAtProcessorTs <- epsilonForTimestamp(store, processorTs) + // find also all upcoming changes which have effective >= processorTs && sequenced <= processorTs + // the change that makes up the epsilon at processorTs would be grabbed by the statement above + upcoming <- tracker.performUnlessClosingF(functionFullName)( + store + .findUpcomingEffectiveChanges(processorTs) + .map(_.collect { + case tdc: Change.TopologyDelay + // filter anything out that might be replayed + if tdc.sequenced.value <= processorTs => + tdc + }) + ) + allPending = (epsilonAtProcessorTs +: upcoming).sortBy(_.sequenced) + _ = { + tracker.logger.debug( + s"Initialising with $allPending" + ) + // Now, replay all the older epsilon updates that might get activated shortly + allPending.foreach { change => + tracker + .adjustEpsilon( + change.effective, + change.sequenced, + change.epsilon, + ) + .discard[Option[NonNegativeFiniteDuration]] + } + } + eff <- tracker.adjustTimestampForTick(SequencedTime(processorTs)) + } yield eff + + def epsilonForTimestamp( + store: TopologyStoreX[TopologyStoreId.DomainStore], + asOfExclusive: CantonTimestamp, + )(implicit + traceContext: TraceContext, + executionContext: ExecutionContext, + ): FutureUnlessShutdown[TopologyStore.Change.TopologyDelay] = { + FutureUnlessShutdown + .outcomeF( + store + .findPositiveTransactions( + asOf = asOfExclusive, + asOfInclusive = false, + isProposal = false, + types = Seq(DomainParametersStateX.code), + filterUid = None, + filterNamespace = None, + ) + ) + .map { txs => + txs.result + .map(x => (x.transaction.transaction.mapping, x)) + .collectFirst { case (change: DomainParametersStateX, tx) => + TopologyStore.Change.TopologyDelay( + tx.sequenced, + tx.validFrom, + change.parameters.topologyChangeDelay, + ) + } + .getOrElse( + TopologyStore.Change.TopologyDelay( + SequencedTime(CantonTimestamp.MinValue), + EffectiveTime(CantonTimestamp.MinValue), + DynamicDomainParameters.topologyChangeDelayIfAbsent, + ) + ) + } + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessor.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessor.scala new file mode 100644 index 0000000000..2420d83484 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessor.scala @@ -0,0 +1,539 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.processing + +import cats.instances.list.* +import cats.syntax.functor.* +import cats.syntax.parallel.* +import com.daml.nameof.NameOf.functionFullName +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.SequencerCounter +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.crypto.{ + Crypto, + CryptoPureApi, + DomainSyncCryptoClient, + PublicKey, + SigningPublicKey, +} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.environment.CantonNodeParameters +import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, HasCloseContext, Lifecycle} +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.protocol.messages.DefaultOpenEnvelope +import com.digitalasset.canton.sequencing.* +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.topology.client.{ + BaseCachingDomainTopologyClient, + CachingDomainTopologyClient, + StoreBasedDomainTopologyClient, +} +import com.digitalasset.canton.topology.store.* +import com.digitalasset.canton.topology.transaction.TopologyChangeOp.Positive +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.topology.{DomainId, Member} +import com.digitalasset.canton.tracing.{TraceContext, Traced} +import com.digitalasset.canton.version.ProtocolVersion +import com.google.common.annotations.VisibleForTesting + +import scala.concurrent.{ExecutionContext, Future} + +/** Main incoming topology transaction validation and processing + * + * The topology transaction processor is subscribed to the event stream and processes + * the domain topology transactions sent via the sequencer. + * + * It validates and then computes the updates to the data store in order to be able + * to represent the topology state at any point in time. + * + * The processor works together with the StoreBasedDomainTopologyClient + */ +class TopologyTransactionProcessor( + domainId: DomainId, + validator: DomainTopologyTransactionMessageValidator, + pureCrypto: CryptoPureApi, + store: TopologyStore[TopologyStoreId.DomainStore], + acsCommitmentScheduleEffectiveTime: Traced[EffectiveTime] => Unit, + futureSupervisor: FutureSupervisor, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends TopologyTransactionProcessorCommonImpl[SignedTopologyTransaction[TopologyChangeOp]]( + domainId, + futureSupervisor, + store, + acsCommitmentScheduleEffectiveTime, + timeouts, + loggerFactory, + ) + with HasCloseContext { + + private val authValidator = + new IncomingTopologyTransactionAuthorizationValidator( + pureCrypto, + store, + Some(domainId), + loggerFactory.append("role", "incoming"), + ) + + override type SubscriberType = TopologyTransactionProcessingSubscriber + + override protected def epsilonForTimestamp( + asOfExclusive: CantonTimestamp + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[TopologyStore.Change.TopologyDelay] = + TopologyTimestampPlusEpsilonTracker.epsilonForTimestamp( + store, + asOfExclusive, + ) + + override protected def maxTimestampFromStore()(implicit + traceContext: TraceContext + ): Future[Option[(SequencedTime, EffectiveTime)]] = store.timestamp(useStateStore = true) + + override protected def initializeTopologyTimestampPlusEpsilonTracker( + processorTs: CantonTimestamp, + maxStored: Option[SequencedTime], + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[EffectiveTime] = { + + // remember the timestamp of the last message in order to validate the signatures + // of the domain topology transaction message. we use the minimum of the two timestamps given, + // should give us the latest state that is valid at the point of the new transaction + // this means that if we validate the transaction, we validate it against the + // as-of-inclusive state of the previous transaction + val latestMsg = processorTs.min(maxStored.map(_.value).getOrElse(processorTs)) + if (latestMsg > CantonTimestamp.MinValue) { + validator.initLastMessageTimestamp(Some(latestMsg)) + } + + TopologyTimestampPlusEpsilonTracker.initialize(timeAdjuster, store, processorTs) + } + + @VisibleForTesting + override private[processing] def process( + sequencingTimestamp: SequencedTime, + effectiveTimestamp: EffectiveTime, + sc: SequencerCounter, + transactions: List[SignedTopologyTransaction[TopologyChangeOp]], + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + // start validation and change delay advancing + val validatedF = performUnlessClosingF(functionFullName)( + authValidator.validateAndUpdateHeadAuthState(effectiveTimestamp.value, transactions).map { + case ret @ (_, validated) => + inspectAndAdvanceTopologyTransactionDelay( + effectiveTimestamp, + sequencingTimestamp, + validated, + ) + ret + } + ) + + // string approx for output + val epsilon = + s"${effectiveTimestamp.value.toEpochMilli - sequencingTimestamp.value.toEpochMilli}" + + // store transactions once they are fully validated + val storeF = + validatedF.flatMap { case (_, validated) => + val ln = validated.length + validated.zipWithIndex.foreach { + case (ValidatedTopologyTransaction(tx, None), idx) => + logger.info( + s"Storing topology transaction ${idx + 1}/$ln ${tx.transaction.op} ${tx.transaction.element.mapping} with ts=$effectiveTimestamp (epsilon=${epsilon} ms)" + ) + case (ValidatedTopologyTransaction(tx, Some(r)), idx) => + logger.warn( + s"Rejected transaction ${idx + 1}/$ln ${tx.transaction.op} ${tx.transaction.element.mapping} at ts=$effectiveTimestamp (epsilon=${epsilon} ms) due to $r" + ) + } + + performUnlessClosingF(functionFullName)( + store.append(sequencingTimestamp, effectiveTimestamp, validated) + ) + } + + // collect incremental and full updates + val collectedF = validatedF.map { case (cascadingUpdate, validated) => + (cascadingUpdate, collectIncrementalUpdate(cascadingUpdate, validated)) + } + + // incremental updates can be written asap + val incrementalF = collectedF.flatMap { case (_, incremental) => + performIncrementalUpdates(sequencingTimestamp, effectiveTimestamp, incremental) + } + + // cascading updates need to wait until the transactions have been stored + val cascadingF = collectedF.flatMap { case (cascading, _) => + for { + _ <- storeF + _ <- performCascadingUpdates(sequencingTimestamp, effectiveTimestamp, cascading) + } yield {} + } + + // resynchronize + for { + validated <- validatedF + (_, validatedTxs) = validated + _ <- incrementalF + _ <- cascadingF // does synchronize storeF + filtered = validatedTxs.collect { + case transaction if transaction.rejectionReason.isEmpty => transaction.transaction + } + _ <- listeners.toList.parTraverse( + _.observed( + sequencingTimestamp, + effectiveTimestamp = effectiveTimestamp, + sc, + filtered, + ) + ) + } yield () + } + + private def inspectAndAdvanceTopologyTransactionDelay( + effectiveTimestamp: EffectiveTime, + sequencingTimestamp: SequencedTime, + validated: Seq[ValidatedTopologyTransaction], + )(implicit traceContext: TraceContext): Unit = { + def applyEpsilon(change: DomainParametersChange): Unit = { + timeAdjuster + .adjustEpsilon( + effectiveTimestamp, + sequencingTimestamp, + change.domainParameters.topologyChangeDelay, + ) + .foreach { previous => + logger.info( + s"Updated topology change delay from=${previous} to ${change.domainParameters.topologyChangeDelay}" + ) + } + timeAdjuster.effectiveTimeProcessed(effectiveTimestamp) + } + val domainParamChanges = validated + .collect { + case validatedTx + if validatedTx.rejectionReason.isEmpty && validatedTx.transaction.transaction.op == TopologyChangeOp.Replace => + validatedTx.transaction.transaction.element + } + .collect { case DomainGovernanceElement(change: DomainParametersChange) => change } + NonEmpty.from(domainParamChanges) match { + // normally, we shouldn't have any adjustment + case None => timeAdjuster.effectiveTimeProcessed(effectiveTimestamp) + case Some(changes) => + // if there is one, there should be exactly one + // If we have several, let's panic now. however, we just pick the last and try to keep working + if (changes.lengthCompare(1) > 0) { + logger.error( + s"Broken or malicious domain topology manager has sent (${changes.length}) domain parameter adjustments at $effectiveTimestamp, will ignore all of them except the last" + ) + } + applyEpsilon(changes.last1) + } + } + + /** pick the transactions which we can process using incremental updates */ + private def collectIncrementalUpdate( + cascadingUpdate: UpdateAggregation, + transactions: Seq[ValidatedTopologyTransaction], + ): Seq[SignedTopologyTransaction[TopologyChangeOp]] = { + def isCascading(elem: SignedTopologyTransaction[TopologyChangeOp]): Boolean = { + elem.transaction.element.mapping.requiredAuth match { + // namespace delegation changes are always cascading + case RequiredAuth.Ns(_, true) => true + // identifier delegation changes are only cascading with respect to namespace + case RequiredAuth.Ns(namespace, false) => + cascadingUpdate.cascadingNamespaces.contains(namespace) + // all others are cascading if there is at least one uid affected by the cascading update + case RequiredAuth.Uid(uids) => uids.exists(cascadingUpdate.isCascading) + } + } + transactions.filter(_.rejectionReason.isEmpty).map(_.transaction).filterNot(isCascading) + } + + private def performIncrementalUpdates( + sequenced: SequencedTime, + effective: EffectiveTime, + transactions: Seq[SignedTopologyTransaction[TopologyChangeOp]], + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + val (deactivate, positive) = SignedTopologyTransactions(transactions).splitForStateUpdate + performUnlessClosingF(functionFullName)( + store.updateState( + sequenced, + effective, + deactivate = deactivate, + positive = positive, + ) + ) + } + + private def determineUpdates( + currents: PositiveSignedTopologyTransactions, + targets: PositiveSignedTopologyTransactions, + )(implicit + traceContext: TraceContext + ): (Seq[UniquePath], Seq[SignedTopologyTransaction[TopologyChangeOp.Positive]]) = { + + val (toRemoveForAdds, toAddForAdds) = + determineRemovesAdds(currents.adds.result, targets.adds.result) + val (toRemoveForReplaces, toAddForReplaces) = + determineRemovesAdds(currents.replaces.result, targets.replaces.result) + + (toRemoveForAdds ++ toRemoveForReplaces, toAddForAdds ++ toAddForReplaces) + } + + private def determineRemovesAdds( + current: Seq[SignedTopologyTransaction[Positive]], + target: Seq[SignedTopologyTransaction[Positive]], + )(implicit + traceContext: TraceContext + ) = { + def toIndex[P <: Positive](sit: SignedTopologyTransaction[P]): ( + AuthorizedTopologyTransaction[TopologyMapping], + SignedTopologyTransaction[P], + ) = AuthorizedTopologyTransaction( + sit.uniquePath, + sit.transaction.element.mapping, + sit, + ) -> sit + + val currentMap = current.map(toIndex).toMap + val targetMap = target.map(toIndex).toMap + + val currentSet = currentMap.keySet + val targetSet = targetMap.keySet + val toRemove = currentSet -- targetSet + val toAdd = targetSet -- currentSet + + toRemove.foreach { item => + logger.debug(s"Cascading remove of $item") + } + toAdd.foreach { item => + logger.debug(s"Cascading addition of $item") + } + + (toRemove.map(_.uniquePath).toSeq, toAdd.toSeq.flatMap(key => targetMap.get(key).toList)) + } + + private def performCascadingUpdates( + sequenced: SequencedTime, + effective: EffectiveTime, + cascadingUpdate: UpdateAggregation, + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = + if (cascadingUpdate.nothingCascading) FutureUnlessShutdown.unit + else { + logger.debug( + s"Performing cascading update on namespace=${cascadingUpdate.authNamespaces} and uids=${cascadingUpdate.filteredCascadingUids}" + ) + + val uids = cascadingUpdate.filteredCascadingUids.toSeq + val namespaces = cascadingUpdate.cascadingNamespaces.toSeq + // filter out txs that don't fall into this namespace / uid realm, but we don't have enough + // information on the db-level to know which tx to ignore and which one to keep + def cascadingFilter(tx: SignedTopologyTransaction[TopologyChangeOp]): Boolean = + tx.transaction.element.mapping.requiredAuth match { + case RequiredAuth.Ns(namespace, _) => + cascadingUpdate.cascadingNamespaces.contains(namespace) + case RequiredAuth.Uid(uids) => + uids.exists(uid => + cascadingUpdate.cascadingNamespaces.contains(uid.namespace) || + cascadingUpdate.filteredCascadingUids.contains(uid) + ) + } + + for { + target <- performUnlessClosingF(functionFullName)( + store.findPositiveTransactions( + asOf = effective.value, + asOfInclusive = true, + includeSecondary = true, + types = DomainTopologyTransactionType.all, + filterUid = Some(uids), + filterNamespace = Some(namespaces), + ) + ) + + targetFiltered = target.signedTransactions.filter { tx => + lazy val isDomainGovernance = tx.transaction.element match { + case _: TopologyStateUpdateElement => false + case _: DomainGovernanceElement => true + } + + /* + We check that the transaction is properly authorized or is a domain governance. + This allows not to drop domain governance transactions with cascading updates. + In the scenario where a key authorizes a domain parameters change and is later + revoked, the domain parameters stay valid. + */ + val isAuthorized = authValidator.isCurrentlyAuthorized(tx) || isDomainGovernance + cascadingFilter(tx) && isAuthorized + } + + current <- performUnlessClosingF(functionFullName)( + store + .findStateTransactions( + asOf = effective.value, + asOfInclusive = true, + includeSecondary = true, + types = DomainTopologyTransactionType.all, + filterUid = Some(uids), + filterNamespace = Some(namespaces), + ) + ) + + currentFiltered = current.signedTransactions.filter(cascadingFilter) + + (removes, adds) = determineUpdates(currentFiltered, targetFiltered) + + _ <- performUnlessClosingF(functionFullName)( + store.updateState(sequenced, effective, deactivate = removes, positive = adds) + ) + } yield () + } + + override protected def extractTopologyUpdatesAndValidateEnvelope( + ts: SequencedTime, + envelopes: List[DefaultOpenEnvelope], + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[List[SignedTopologyTransaction[TopologyChangeOp]]] = { + validator.extractTopologyUpdatesAndValidateEnvelope(ts, envelopes) + } + + override def onClosed(): Unit = { + super.onClosed() + Lifecycle.close(store)(logger) + } + +} + +object TopologyTransactionProcessor { + + def createProcessorAndClientForDomain( + topologyStore: TopologyStore[TopologyStoreId.DomainStore], + owner: Member, + domainId: DomainId, + protocolVersion: ProtocolVersion, + crypto: Crypto, + initKeys: Map[Member, Seq[PublicKey]], + parameters: CantonNodeParameters, + clock: Clock, + futureSupervisor: FutureSupervisor, + loggerFactory: NamedLoggerFactory, + )(implicit + traceContext: TraceContext, + executionContext: ExecutionContext, + ): Future[ + (TopologyTransactionProcessor, BaseCachingDomainTopologyClient) + ] = { + val topologyClientF = + CachingDomainTopologyClient + .create( + clock, + domainId, + protocolVersion, + topologyStore, + SigningPublicKey.collect(initKeys), + StoreBasedDomainTopologyClient.NoPackageDependencies, + parameters.cachingConfigs, + parameters.batchingConfig, + parameters.processingTimeouts, + futureSupervisor, + loggerFactory, + ) + topologyClientF.map { topologyClient => + val cryptoClient = new DomainSyncCryptoClient( + owner, + domainId, + topologyClient, + crypto, + parameters.cachingConfigs, + parameters.processingTimeouts, + futureSupervisor, + loggerFactory, + ) + + val topologyProcessor = new TopologyTransactionProcessor( + domainId = domainId, + validator = DomainTopologyTransactionMessageValidator.create( + parameters.skipTopologyManagerSignatureValidation, + cryptoClient, + owner, + protocolVersion, + parameters.processingTimeouts, + futureSupervisor, + loggerFactory, + ), + pureCrypto = cryptoClient.pureCrypto, + store = topologyStore, + acsCommitmentScheduleEffectiveTime = _ => (), + futureSupervisor = futureSupervisor, + timeouts = parameters.processingTimeouts, + loggerFactory = loggerFactory, + ) + topologyProcessor.subscribe(topologyClient) + (topologyProcessor, topologyClient) + } + } + + /** Returns the timestamps for initializing the processor and client for a restarted or fresh subscription. */ + def subscriptionTimestamp( + start: SubscriptionStart, + storedTimestamps: Option[(SequencedTime, EffectiveTime)], + ): (CantonTimestamp, Either[SequencedTime, EffectiveTime]) = { + import SubscriptionStart.* + start match { + case restart: ResubscriptionStart => + resubscriptionTimestamp(restart) + case FreshSubscription => + storedTimestamps.fold( + // Fresh subscription with an empty domain topology store + // processor: init at ts = min + // client: init at ts = min + (CantonTimestamp.MinValue, Right(EffectiveTime(CantonTimestamp.MinValue))) + ) { case (sequenced, effective) => + // Fresh subscription with a bootstrapping timestamp + // NOTE: we assume that the bootstrapping topology snapshot does not contain the first message + // that we are going to receive from the domain + // processor: init at max(sequence-time) of bootstrapping transactions + // client: init at max(effective-time) of bootstrapping transactions + (sequenced.value, Right(effective)) + } + } + } + + /** Returns the timestamps for initializing the processor and client for a restarted subscription. */ + def resubscriptionTimestamp( + start: ResubscriptionStart + ): (CantonTimestamp, Either[SequencedTime, EffectiveTime]) = { + import SubscriptionStart.* + start match { + // clean-head subscription. this means that the first event we are going to get is > cleanPrehead + // and all our stores are clean. + // processor: initialise with ts = cleanPrehead + // client: approximate time: cleanPrehead, knownUntil = cleanPrehead + epsilon + // plus, there might be "effective times" > cleanPrehead, so we need to schedule the adjustment + // of the approximate time to the effective time + case CleanHeadResubscriptionStart(cleanPrehead) => + (cleanPrehead, Left(SequencedTime(cleanPrehead))) + // dirty or replay subscription. + // processor: initialise with firstReplayed.predecessor, as the next message we'll be getting is the firstReplayed + // client: same as clean-head resubscription + case ReplayResubscriptionStart(firstReplayed, Some(cleanPrehead)) => + (firstReplayed.immediatePredecessor, Left(SequencedTime(cleanPrehead))) + // dirty re-subscription of a node that crashed before fully processing the first event + // processor: initialise with firstReplayed.predecessor, as the next message we'll be getting is the firstReplayed + // client: initialise client with firstReplayed (careful: firstReplayed is known, but firstReplayed.immediateSuccessor not) + case ReplayResubscriptionStart(firstReplayed, None) => + ( + firstReplayed.immediatePredecessor, + Right(EffectiveTime(firstReplayed.immediatePredecessor)), + ) + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorCommon.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorCommon.scala new file mode 100644 index 0000000000..fccd65657b --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorCommon.scala @@ -0,0 +1,359 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.processing + +import com.daml.nameof.NameOf.functionFullName +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.SequencerCounter +import com.digitalasset.canton.concurrent.{DirectExecutionContext, FutureSupervisor} +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown, Lifecycle} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.protocol.messages.{DefaultOpenEnvelope, ProtocolMessage} +import com.digitalasset.canton.sequencing.* +import com.digitalasset.canton.sequencing.protocol.{Deliver, DeliverError} +import com.digitalasset.canton.time.DomainTimeTracker +import com.digitalasset.canton.topology.processing.TopologyTransactionProcessor.subscriptionTimestamp +import com.digitalasset.canton.topology.store.{TopologyStore, TopologyStoreCommon} +import com.digitalasset.canton.topology.{DomainId, TopologyManagerError} +import com.digitalasset.canton.tracing.{TraceContext, Traced} +import com.digitalasset.canton.util.{ErrorUtil, FutureUtil, MonadUtil, SimpleExecutionQueue} + +import java.util.concurrent.atomic.AtomicBoolean +import scala.collection.mutable.ListBuffer +import scala.concurrent.{ExecutionContext, Future} + +trait TopologyTransactionProcessorCommon extends NamedLogging with FlagCloseable { + + /** Inform the topology manager where the subscription starts when using [[processEnvelopes]] rather than [[createHandler]] */ + def subscriptionStartsAt(start: SubscriptionStart, domainTimeTracker: DomainTimeTracker)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Unit] + + def createHandler(domainId: DomainId): UnsignedProtocolEventHandler + + /** process envelopes mostly asynchronously + * + * Here, we return a Future[Future[Unit]]. We need to ensure the outer future finishes processing + * before we tick the record order publisher. + */ + def processEnvelopes( + sc: SequencerCounter, + ts: SequencedTime, + envelopes: Traced[List[DefaultOpenEnvelope]], + ): HandlerResult + +} + +/** Main incoming topology transaction validation and processing + * + * The topology transaction processor is subscribed to the event stream and processes + * the domain topology transactions sent via the sequencer. + * + * It validates and then computes the updates to the data store in order to be able + * to represent the topology state at any point in time. + * + * The processor works together with the StoreBasedDomainTopologyClient + */ +abstract class TopologyTransactionProcessorCommonImpl[M]( + domainId: DomainId, + futureSupervisor: FutureSupervisor, + store: TopologyStoreCommon[_, _, _, _], + acsCommitmentScheduleEffectiveTime: Traced[EffectiveTime] => Unit, + override protected val timeouts: ProcessingTimeout, + val loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends TopologyTransactionProcessorCommon { + + private val initialised = new AtomicBoolean(false) + + type SubscriberType <: TopologyTransactionProcessingSubscriberCommon + + protected val listeners = ListBuffer[SubscriberType]() + + protected val timeAdjuster = + new TopologyTimestampPlusEpsilonTracker(timeouts, loggerFactory, futureSupervisor) + + private val serializer = new SimpleExecutionQueue( + "topology-transaction-processor-queue", + futureSupervisor, + timeouts, + loggerFactory, + ) + + /** assumption: subscribers don't do heavy lifting */ + final def subscribe(listener: SubscriberType): Unit = { + listeners += listener + } + + protected def epsilonForTimestamp( + asOfExclusive: CantonTimestamp + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[TopologyStore.Change.TopologyDelay] + + protected def maxTimestampFromStore()(implicit + traceContext: TraceContext + ): Future[Option[(SequencedTime, EffectiveTime)]] + + protected def initializeTopologyTimestampPlusEpsilonTracker( + processorTs: CantonTimestamp, + maxStored: Option[SequencedTime], + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[EffectiveTime] + + private def initialise( + start: SubscriptionStart, + domainTimeTracker: DomainTimeTracker, + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + + ErrorUtil.requireState( + !initialised.getAndSet(true), + "topology processor is already initialised", + ) + + def initClientFromSequencedTs( + sequencedTs: SequencedTime + ): FutureUnlessShutdown[NonEmpty[Seq[(EffectiveTime, ApproximateTime)]]] = for { + // we need to figure out any future effective time. if we had been running, there would be a clock + // scheduled to poke the domain client at the given time in order to adjust the approximate timestamp up to the + // effective time at the given point in time. we need to recover these as otherwise, we might be using outdated + // topology snapshots on startup. (wouldn't be tragic as by getting the rejects, we'd be updating the timestamps + // anyway). + upcoming <- performUnlessClosingF(functionFullName)( + store.findUpcomingEffectiveChanges(sequencedTs.value) + // find effective time of sequenced Ts (directly from store) + // merge times + ) + currentEpsilon <- epsilonForTimestamp(sequencedTs.value) + } yield { + // we have (ts+e, ts) and quite a few te in the future, so we create list of upcoming changes and sort them + + val head = ( + EffectiveTime(sequencedTs.value.plus(currentEpsilon.epsilon.unwrap)), + ApproximateTime(sequencedTs.value), + ) + val tail = upcoming.map(x => (x.effective, x.effective.toApproximate)) + + NonEmpty(Seq, head, tail *).sortBy { case (effectiveTime, _) => effectiveTime.value } + } + + for { + stateStoreTsO <- performUnlessClosingF(functionFullName)( + maxTimestampFromStore() + ) + (processorTs, clientTs) = subscriptionTimestamp(start, stateStoreTsO) + _ <- initializeTopologyTimestampPlusEpsilonTracker(processorTs, stateStoreTsO.map(_._1)) + + clientInitTimes <- clientTs match { + case Left(sequencedTs) => + // approximate time is sequencedTs + initClientFromSequencedTs(sequencedTs) + case Right(effective) => + // effective and approximate time are effective time + FutureUnlessShutdown.pure(NonEmpty(Seq, (effective, effective.toApproximate))) + } + } yield { + logger.debug( + s"Initializing topology processing for start=$start with effective ts ${clientInitTimes.map(_._1)}" + ) + + // let our client know about the latest known information right now, but schedule the updating + // of the approximate time subsequently + val maxEffective = clientInitTimes.map { case (effective, _) => effective }.max1 + val minApproximate = clientInitTimes.map { case (_, approximate) => approximate }.min1 + listenersUpdateHead(maxEffective, minApproximate, potentialChanges = true) + + val directExecutionContext = DirectExecutionContext(noTracingLogger) + clientInitTimes.foreach { case (effective, _approximate) => + // if the effective time is in the future, schedule a clock to update the time accordingly + domainTimeTracker.awaitTick(effective.value) match { + case None => + // The effective time is in the past. Directly advance our approximate time to the respective effective time + listenersUpdateHead(effective, effective.toApproximate, potentialChanges = true) + case Some(tickF) => + FutureUtil.doNotAwait( + tickF.map(_ => + listenersUpdateHead(effective, effective.toApproximate, potentialChanges = true) + )(directExecutionContext), + "Notifying listeners to the topology processor's head", + ) + } + } + } + } + + final protected def listenersUpdateHead( + effective: EffectiveTime, + approximate: ApproximateTime, + potentialChanges: Boolean, + )(implicit traceContext: TraceContext): Unit = { + logger.debug( + s"Updating listener heads to ${effective} and ${approximate}. Potential changes: ${potentialChanges}" + ) + listeners.toList.foreach(_.updateHead(effective, approximate, potentialChanges)) + } + + /** Inform the topology manager where the subscription starts when using [[processEnvelopes]] rather than [[createHandler]] */ + override def subscriptionStartsAt(start: SubscriptionStart, domainTimeTracker: DomainTimeTracker)( + implicit traceContext: TraceContext + ): FutureUnlessShutdown[Unit] = initialise(start, domainTimeTracker) + + /** process envelopes mostly asynchronously + * + * Here, we return a Future[Future[Unit]]. We need to ensure the outer future finishes processing + * before we tick the record order publisher. + */ + override def processEnvelopes( + sc: SequencerCounter, + ts: SequencedTime, + envelopes: Traced[List[DefaultOpenEnvelope]], + ): HandlerResult = + envelopes.withTraceContext { implicit traceContext => env => + internalProcessEnvelopes( + sc, + ts, + extractTopologyUpdatesAndValidateEnvelope(ts, env), + ) + } + + protected def extractTopologyUpdatesAndValidateEnvelope( + ts: SequencedTime, + value: List[DefaultOpenEnvelope], + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[List[M]] + + private[processing] def process( + sequencingTimestamp: SequencedTime, + effectiveTimestamp: EffectiveTime, + sc: SequencerCounter, + messages: List[M], + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] + + protected def internalProcessEnvelopes( + sc: SequencerCounter, + sequencedTime: SequencedTime, + updatesF: FutureUnlessShutdown[List[M]], + )(implicit traceContext: TraceContext): HandlerResult = { + def computeEffectiveTime( + updates: List[M] + ): FutureUnlessShutdown[EffectiveTime] = { + if (updates.nonEmpty) { + val effectiveTimeF = + futureSupervisor.supervisedUS(s"adjust ts=$sequencedTime for update")( + timeAdjuster.adjustTimestampForUpdate(sequencedTime) + ) + + // we need to inform the acs commitment processor about the incoming change + effectiveTimeF.map { effectiveTime => + // this is safe to do here, as the acs commitment processor `publish` method will only be + // invoked long after the outer future here has finished processing + acsCommitmentScheduleEffectiveTime(Traced(effectiveTime)) + effectiveTime + } + } else { + futureSupervisor.supervisedUS(s"adjust ts=$sequencedTime for update")( + timeAdjuster.adjustTimestampForTick(sequencedTime) + ) + } + } + + for { + updates <- updatesF + _ <- ErrorUtil.requireStateAsyncShutdown( + initialised.get(), + s"Topology client for $domainId is not initialized. Cannot process sequenced event with counter ${sc} at ${sequencedTime}", + ) + // compute effective time + effectiveTime <- computeEffectiveTime(updates) + } yield { + // the rest, we'll run asynchronously, but sequential + val scheduledF = + serializer.executeUS( + { + if (updates.nonEmpty) { + process(sequencedTime, effectiveTime, sc, updates) + } else { + tickleListeners(sequencedTime, effectiveTime) + } + }, + "processing topology transactions", + ) + AsyncResult(scheduledF) + } + } + + private def tickleListeners( + sequencedTimestamp: SequencedTime, + effectiveTimestamp: EffectiveTime, + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + this.performUnlessClosingF(functionFullName) { + Future { + val approximate = ApproximateTime(sequencedTimestamp.value) + listenersUpdateHead(effectiveTimestamp, approximate, potentialChanges = false) + } + } + } + + override def createHandler(domainId: DomainId): UnsignedProtocolEventHandler = + new UnsignedProtocolEventHandler { + + override def name: String = s"topology-processor-$domainId" + + override def apply( + tracedBatch: BoxedEnvelope[UnsignedEnvelopeBox, DefaultOpenEnvelope] + ): HandlerResult = { + MonadUtil.sequentialTraverseMonoid(tracedBatch.value) { + _.withTraceContext { implicit traceContext => + { + case Deliver(sc, ts, _, _, batch) => + logger.debug(s"Processing sequenced event with counter $sc and timestamp $ts") + val sequencedTime = SequencedTime(ts) + val transactionsF = extractTopologyUpdatesAndValidateEnvelope( + sequencedTime, + ProtocolMessage.filterDomainsEnvelopes( + batch, + domainId, + (wrongMsgs: List[DefaultOpenEnvelope]) => + TopologyManagerError.TopologyManagerAlarm + .Warn( + s"received messages with wrong domain ids: ${wrongMsgs.map(_.protocolMessage.domainId)}" + ) + .report(), + ), + ) + internalProcessEnvelopes(sc, sequencedTime, transactionsF) + case _: DeliverError => HandlerResult.done + } + } + } + } + + override def subscriptionStartsAt( + start: SubscriptionStart, + domainTimeTracker: DomainTimeTracker, + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = + TopologyTransactionProcessorCommonImpl.this.subscriptionStartsAt(start, domainTimeTracker) + } + + override def onClosed(): Unit = { + Lifecycle.close( + timeAdjuster, + store, + serializer, + )(logger) + } + +} + +object TopologyTransactionProcessorCommon { + abstract class Factory { + def create( + acsCommitmentScheduleEffectiveTime: Traced[EffectiveTime] => Unit + )(implicit executionContext: ExecutionContext): TopologyTransactionProcessorCommon + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorX.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorX.scala new file mode 100644 index 0000000000..9df6e0a807 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorX.scala @@ -0,0 +1,238 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.processing + +import cats.syntax.functorFilter.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.SequencerCounter +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.crypto.Crypto +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.environment.CantonNodeParameters +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.protocol.messages.{ + DefaultOpenEnvelope, + ProtocolMessage, + TopologyTransactionsBroadcastX, +} +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.topology.client.{ + DomainTopologyClientWithInitX, + StoreBasedDomainTopologyClient, + StoreBasedDomainTopologyClientX, +} +import com.digitalasset.canton.topology.store.TopologyStore.Change +import com.digitalasset.canton.topology.store.ValidatedTopologyTransactionX.GenericValidatedTopologyTransactionX +import com.digitalasset.canton.topology.store.{TopologyStoreId, TopologyStoreX} +import com.digitalasset.canton.topology.transaction.{ + DomainParametersStateX, + TopologyChangeOpX, + ValidatingTopologyMappingXChecks, +} +import com.digitalasset.canton.topology.{DomainId, TopologyStateProcessorX} +import com.digitalasset.canton.tracing.{TraceContext, Traced} +import com.digitalasset.canton.version.ProtocolVersion + +import scala.concurrent.{ExecutionContext, Future} + +class TopologyTransactionProcessorX( + domainId: DomainId, + crypto: Crypto, + store: TopologyStoreX[TopologyStoreId.DomainStore], + acsCommitmentScheduleEffectiveTime: Traced[EffectiveTime] => Unit, + terminateProcessing: TerminateProcessing, + enableTopologyTransactionValidation: Boolean, + futureSupervisor: FutureSupervisor, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends TopologyTransactionProcessorCommonImpl[TopologyTransactionsBroadcastX]( + domainId, + futureSupervisor, + store, + acsCommitmentScheduleEffectiveTime, + timeouts, + loggerFactory, + ) { + + override type SubscriberType = TopologyTransactionProcessingSubscriberX + + private val stateProcessor = new TopologyStateProcessorX( + store, + None, + enableTopologyTransactionValidation, + new ValidatingTopologyMappingXChecks(store, loggerFactory), + crypto, + loggerFactory, + ) + + override def onClosed(): Unit = { + super.onClosed() + } + + override protected def epsilonForTimestamp(asOfExclusive: CantonTimestamp)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Change.TopologyDelay] = + TopologyTimestampPlusEpsilonTracker.epsilonForTimestamp(store, asOfExclusive) + + override protected def maxTimestampFromStore()(implicit + traceContext: TraceContext + ): Future[Option[(SequencedTime, EffectiveTime)]] = store.maxTimestamp() + + override protected def initializeTopologyTimestampPlusEpsilonTracker( + processorTs: CantonTimestamp, + maxStored: Option[SequencedTime], + )(implicit traceContext: TraceContext): FutureUnlessShutdown[EffectiveTime] = + TopologyTimestampPlusEpsilonTracker.initializeX(timeAdjuster, store, processorTs) + + override protected def extractTopologyUpdatesAndValidateEnvelope( + ts: SequencedTime, + envelopes: List[DefaultOpenEnvelope], + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[List[TopologyTransactionsBroadcastX]] = { + FutureUnlessShutdown.pure( + envelopes + .mapFilter(ProtocolMessage.select[TopologyTransactionsBroadcastX]) + .map(_.protocolMessage) + ) + } + + override private[processing] def process( + sequencingTimestamp: SequencedTime, + effectiveTimestamp: EffectiveTime, + sc: SequencerCounter, + messages: List[TopologyTransactionsBroadcastX], + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + val tx = messages.flatMap(_.broadcasts).flatMap(_.transactions) + performUnlessClosingEitherU("process-topology-transaction")( + stateProcessor + .validateAndApplyAuthorization( + sequencingTimestamp, + effectiveTimestamp, + tx, + abortIfCascading = false, + expectFullAuthorization = false, + ) + ).merge + .flatMap { validated => + inspectAndAdvanceTopologyTransactionDelay( + sequencingTimestamp, + effectiveTimestamp, + validated, + ) + logger.debug( + s"Notifying listeners of ${sequencingTimestamp}, ${effectiveTimestamp} and SC ${sc}" + ) + import cats.syntax.parallel.* + + for { + _ <- performUnlessClosingUSF("notify-topology-transaction-observers")( + listeners.toList.parTraverse_( + _.observed( + sequencingTimestamp, + effectiveTimestamp, + sc, + validated.collect { case tx if tx.rejectionReason.isEmpty => tx.transaction }, + ) + ) + ) + + _ <- performUnlessClosingF("terminate-processing")( + terminateProcessing.terminate(sc, sequencingTimestamp, effectiveTimestamp) + ) + } yield () + + } + } + + private def inspectAndAdvanceTopologyTransactionDelay( + sequencingTimestamp: SequencedTime, + effectiveTimestamp: EffectiveTime, + validated: Seq[GenericValidatedTopologyTransactionX], + )(implicit traceContext: TraceContext): Unit = { + def applyEpsilon(mapping: DomainParametersStateX) = { + timeAdjuster + .adjustEpsilon( + effectiveTimestamp, + sequencingTimestamp, + mapping.parameters.topologyChangeDelay, + ) + .foreach { previous => + logger.info( + s"Updated topology change delay from=${previous} to ${mapping.parameters.topologyChangeDelay}" + ) + } + timeAdjuster.effectiveTimeProcessed(effectiveTimestamp) + } + + val domainParamChanges = validated.flatMap( + _.collectOf[TopologyChangeOpX.Replace, DomainParametersStateX] + .filter( + _.rejectionReason.isEmpty + ) + .map(_.transaction.transaction.mapping) + ) + + NonEmpty.from(domainParamChanges) match { + // normally, we shouldn't have any adjustment + case None => timeAdjuster.effectiveTimeProcessed(effectiveTimestamp) + case Some(changes) => + // if there is one, there should be exactly one + // If we have several, let's panic now. however, we just pick the last and try to keep working + if (changes.lengthCompare(1) > 0) { + logger.error( + s"Broken or malicious domain topology manager has sent (${changes.length}) domain parameter adjustments at $effectiveTimestamp, will ignore all of them except the last" + ) + } + applyEpsilon(changes.last1) + } + } + +} + +object TopologyTransactionProcessorX { + def createProcessorAndClientForDomain( + topologyStore: TopologyStoreX[TopologyStoreId.DomainStore], + domainId: DomainId, + protocolVersion: ProtocolVersion, + crypto: Crypto, + parameters: CantonNodeParameters, + enableTopologyTransactionValidation: Boolean, + clock: Clock, + futureSupervisor: FutureSupervisor, + loggerFactory: NamedLoggerFactory, + )(implicit + executionContext: ExecutionContext + ): Future[(TopologyTransactionProcessorX, DomainTopologyClientWithInitX)] = { + + val processor = new TopologyTransactionProcessorX( + domainId, + crypto, + topologyStore, + _ => (), + TerminateProcessing.NoOpTerminateTopologyProcessing, + enableTopologyTransactionValidation, + futureSupervisor, + parameters.processingTimeouts, + loggerFactory, + ) + + val client = new StoreBasedDomainTopologyClientX( + clock, + domainId, + protocolVersion, + topologyStore, + StoreBasedDomainTopologyClient.NoPackageDependencies, + parameters.processingTimeouts, + futureSupervisor, + loggerFactory, + ) + + processor.subscribe(client) + Future.successful((processor, client)) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionSubscriber.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionSubscriber.scala new file mode 100644 index 0000000000..a63828c59d --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionSubscriber.scala @@ -0,0 +1,55 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.processing + +import com.digitalasset.canton.SequencerCounter +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX +import com.digitalasset.canton.topology.transaction.{SignedTopologyTransaction, TopologyChangeOp} +import com.digitalasset.canton.tracing.TraceContext + +trait TopologyTransactionProcessingSubscriberCommon { + + /** Move the most known timestamp ahead in future based of newly discovered information + * + * We don't know the most recent timestamp directly. However, we can guess it from two sources: + * What was the timestamp of the latest topology transaction added? And what was the last processing timestamp. + * We need to know both such that we can always deliver the latest valid set of topology information, and don't use + * old snapshots. + * Therefore, we expose the updateHead function on the public interface for initialisation purposes. + * + * @param effectiveTimestamp sequencer timestamp + epsilon(sequencer timestamp) + * @param approximateTimestamp our current best guess of what the "best" timestamp is to get a valid current topology snapshot + * @param potentialTopologyChange if true, the time advancement is related to a topology change that might have occurred or become effective + */ + def updateHead( + effectiveTimestamp: EffectiveTime, + approximateTimestamp: ApproximateTime, + potentialTopologyChange: Boolean, + )(implicit traceContext: TraceContext): Unit = () +} + +trait TopologyTransactionProcessingSubscriber + extends TopologyTransactionProcessingSubscriberCommon { + + def observed( + sequencedTimestamp: SequencedTime, + effectiveTimestamp: EffectiveTime, + sequencerCounter: SequencerCounter, + transactions: Seq[SignedTopologyTransaction[TopologyChangeOp]], + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] + +} + +trait TopologyTransactionProcessingSubscriberX + extends TopologyTransactionProcessingSubscriberCommon { + + def observed( + sequencedTimestamp: SequencedTime, + effectiveTimestamp: EffectiveTime, + sequencerCounter: SequencerCounter, + transactions: Seq[GenericSignedTopologyTransactionX], + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TransactionAuthorizationValidator.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TransactionAuthorizationValidator.scala new file mode 100644 index 0000000000..be627fba79 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TransactionAuthorizationValidator.scala @@ -0,0 +1,240 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.processing + +import cats.instances.list.* +import cats.instances.option.* +import cats.syntax.traverse.* +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.crypto.Fingerprint +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.NamedLogging +import com.digitalasset.canton.topology.processing.AuthorizedTopologyTransaction.{ + AuthorizedIdentifierDelegation, + AuthorizedNamespaceDelegation, +} +import com.digitalasset.canton.topology.processing.TransactionAuthorizationValidator.AuthorizationChain +import com.digitalasset.canton.topology.store.{TopologyStore, TopologyStoreId} +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.topology.{Namespace, UniqueIdentifier} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ErrorUtil + +import scala.collection.concurrent.TrieMap +import scala.collection.mutable +import scala.concurrent.{ExecutionContext, Future} + +/** common functionality shared between incoming transaction authorization validator and the auth computation */ +trait TransactionAuthorizationValidator { + + this: NamedLogging => + + protected val namespaceCache = new TrieMap[Namespace, AuthorizationGraph]() + protected val identifierDelegationCache = + new TrieMap[UniqueIdentifier, Set[AuthorizedIdentifierDelegation]]() + + protected def store: TopologyStore[TopologyStoreId] + + def isCurrentlyAuthorized(sit: SignedTopologyTransaction[TopologyChangeOp]): Boolean = { + val authKey = sit.key.fingerprint + if (NamespaceDelegation.isRootCertificate(sit)) true + else + sit.transaction.element.mapping.requiredAuth match { + case RequiredAuth.Ns(namespace, rootDelegation) => + getAuthorizationGraphForNamespace(namespace).isValidAuthorizationKey( + authKey, + requireRoot = rootDelegation, + ) + case RequiredAuth.Uid(uids) => uids.forall(isAuthorizedForUid(_, authKey)) + } + } + + def authorizationChainFor( + sit: SignedTopologyTransaction[TopologyChangeOp] + ): Option[AuthorizationChain] = { + val authKey = sit.key.fingerprint + if (NamespaceDelegation.isRootCertificate(sit)) Some(AuthorizationChain(Seq(), Seq())) + else + sit.transaction.element.mapping.requiredAuth match { + case RequiredAuth.Ns(namespace, rootDelegation) => + getAuthorizationGraphForNamespace(namespace).authorizationChain( + authKey, + requireRoot = rootDelegation, + ) + case RequiredAuth.Uid(uids) => + uids.toList + .traverse(authorizationChainFor(_, authKey)) + .map(_.foldLeft(AuthorizationChain(Seq(), Seq())) { case (acc, elem) => + acc.merge(elem) + }) + } + } + + protected def authorizationChainFor( + uid: UniqueIdentifier, + authKey: Fingerprint, + ): Option[AuthorizationChain] = { + val graph = getAuthorizationGraphForNamespace(uid.namespace) + graph.authorizationChain(authKey, requireRoot = false).orElse { + getAuthorizedIdentifierDelegation(graph, uid, authKey).flatMap { aid => + graph + .authorizationChain(aid.signingKey, requireRoot = false) + .map(_.addIdentifierDelegation(aid)) + } + } + } + + private def getAuthorizedIdentifierDelegation( + graph: AuthorizationGraph, + uid: UniqueIdentifier, + authKey: Fingerprint, + ): Option[AuthorizedIdentifierDelegation] = { + getIdentifierDelegationsForUid(uid) + .filter(_.mapping.target.fingerprint == authKey) + .find(aid => graph.isValidAuthorizationKey(aid.signingKey, requireRoot = false)) + } + + def isAuthorizedForUid(uid: UniqueIdentifier, authKey: Fingerprint): Boolean = { + val graph = getAuthorizationGraphForNamespace(uid.namespace) + graph.isValidAuthorizationKey( + authKey, + requireRoot = false, + ) || getAuthorizedIdentifierDelegation(graph, uid, authKey).nonEmpty + } + + protected def getIdentifierDelegationsForUid( + uid: UniqueIdentifier + ): Set[AuthorizedIdentifierDelegation] = { + identifierDelegationCache + .getOrElse(uid, Set()) + } + + protected def getAuthorizationGraphForNamespace( + namespace: Namespace + ): AuthorizationGraph = + namespaceCache.getOrElseUpdate( + namespace, + new AuthorizationGraph(namespace, extraDebugInfo = false, loggerFactory), + ) + + protected def loadAuthorizationGraphs( + timestamp: CantonTimestamp, + namespaces: Set[Namespace], + )(implicit + traceContext: TraceContext, + executionContext: ExecutionContext, + ): Future[Unit] = { + val loadNamespaces = + namespaces -- namespaceCache.keySet // only load the ones we don't already hold in memory + for { + existing <- store.findPositiveTransactions( + asOf = timestamp, + asOfInclusive = false, + includeSecondary = false, + types = Seq(DomainTopologyTransactionType.NamespaceDelegation), + filterUid = None, + filterNamespace = Some(loadNamespaces.toSeq), + ) + } yield { + existing.adds + .toAuthorizedTopologyTransactions { case x: NamespaceDelegation => x } + .groupBy(_.mapping.namespace) + .foreach { case (namespace, transactions) => + ErrorUtil.requireArgument( + !namespaceCache.isDefinedAt(namespace), + s"graph shouldnt exist before loading ${namespaces} vs ${namespaceCache.keySet}", + ) + val graph = new AuthorizationGraph(namespace, extraDebugInfo = false, loggerFactory) + namespaceCache.put(namespace, graph).discard + // use un-authorized batch load. while we are checking for proper authorization when we + // add a certificate the first time, we allow for the situation where an intermediate certificate + // is currently expired, but might be replaced with another cert. in this case, + // the authorization check would fail. + // unauthorized certificates are not really an issue as we'll simply exclude them when calculating + // the connected graph + graph.unauthorizedAdd(transactions) + } + } + } + + protected def loadIdentifierDelegations( + timestamp: CantonTimestamp, + namespaces: Seq[Namespace], + uids: Set[UniqueIdentifier], + )(implicit + traceContext: TraceContext, + executionContext: ExecutionContext, + ): Future[Set[UniqueIdentifier]] = { + store + .findPositiveTransactions( + asOf = timestamp, + asOfInclusive = false, + includeSecondary = false, + types = Seq(DomainTopologyTransactionType.IdentifierDelegation), + filterNamespace = Some(namespaces), + filterUid = Some((uids -- identifierDelegationCache.keySet).toSeq), + ) + .map(_.adds.toAuthorizedTopologyTransactions { case x: IdentifierDelegation => x }) + .map { loaded => + // include the uids which we already cache + val start = + identifierDelegationCache.keySet.filter(x => namespaces.contains(x.namespace)).toSet + loaded.foldLeft(start) { case (acc, item) => + mergeLoadedIdentifierDelegation(item) + val uid = item.mapping.identifier + if (namespaces.contains(uid.namespace)) + acc + uid + else acc + } + } + } + + private def mergeLoadedIdentifierDelegation(item: AuthorizedIdentifierDelegation): Unit = + updateIdentifierDelegationCache(item.mapping.identifier, _ + item) + + protected def updateIdentifierDelegationCache( + uid: UniqueIdentifier, + op: Set[AuthorizedIdentifierDelegation] => Set[AuthorizedIdentifierDelegation], + ): Unit = { + val cur = identifierDelegationCache.getOrElseUpdate(uid, Set()) + identifierDelegationCache.update(uid, op(cur)).discard + } + +} + +object TransactionAuthorizationValidator { + + /** authorization data + * + * this type is returned by the authorization validator. it contains the series of transactions + * that authorize a certain topology transaction. + * + * note that the order of the namespace delegation is in "authorization order". + */ + final case class AuthorizationChain( + identifierDelegation: Seq[AuthorizedIdentifierDelegation], + namespaceDelegations: Seq[AuthorizedNamespaceDelegation], + ) { + + def addIdentifierDelegation(aid: AuthorizedIdentifierDelegation): AuthorizationChain = + copy(identifierDelegation = identifierDelegation :+ aid) + + def merge(other: AuthorizationChain): AuthorizationChain = { + AuthorizationChain( + mergeUnique(this.identifierDelegation, other.identifierDelegation), + mergeUnique(this.namespaceDelegations, other.namespaceDelegations), + ) + } + + private def mergeUnique[T](left: Seq[T], right: Seq[T]): Seq[T] = { + mutable.LinkedHashSet.from(left).addAll(right).toSeq + } + + } + + object AuthorizationChain { + val empty = AuthorizationChain(Seq(), Seq()) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TransactionAuthorizationValidatorX.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TransactionAuthorizationValidatorX.scala new file mode 100644 index 0000000000..6a89f8dd2d --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TransactionAuthorizationValidatorX.scala @@ -0,0 +1,325 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.processing + +import cats.syntax.bifunctor.* +import cats.syntax.foldable.* +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.crypto.{CryptoPureApi, Fingerprint} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.NamedLogging +import com.digitalasset.canton.topology.processing.AuthorizedTopologyTransactionX.AuthorizedIdentifierDelegationX +import com.digitalasset.canton.topology.store.{ + TopologyStoreId, + TopologyStoreX, + TopologyTransactionRejection, +} +import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX +import com.digitalasset.canton.topology.transaction.TopologyMappingX.RequiredAuthXAuthorizations +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.topology.{Namespace, UniqueIdentifier} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ErrorUtil + +import scala.collection.concurrent.TrieMap +import scala.concurrent.{ExecutionContext, Future} + +/** common functionality shared between incoming transaction authorization validator and the auth computation */ +trait TransactionAuthorizationValidatorX { + + this: NamedLogging => + + protected val namespaceCache = new TrieMap[Namespace, AuthorizationGraphX]() + protected val identifierDelegationCache = + new TrieMap[UniqueIdentifier, Set[AuthorizedIdentifierDelegationX]]() + protected val unionspaceCache = + new TrieMap[Namespace, (UnionspaceDefinitionX, UnionspaceAuthorizationGraphX)]() + + protected def store: TopologyStoreX[TopologyStoreId] + + protected def pureCrypto: CryptoPureApi + + def isCurrentlyAuthorized( + toValidate: GenericSignedTopologyTransactionX, + inStore: Option[GenericSignedTopologyTransactionX], + ): Either[TopologyTransactionRejection, RequiredAuthXAuthorizations] = { + // first determine all possible namespaces and uids that need to sign the transaction + val requiredAuth = toValidate.transaction.mapping.requiredAuth(inStore.map(_.transaction)) + val required = requiredAuth + .foldMap( + namespaceCheck = rns => + RequiredAuthXAuthorizations( + namespacesWithRoot = + if (rns.requireRootDelegation) rns.namespaces else Set.empty[Namespace], + namespaces = if (rns.requireRootDelegation) Set.empty[Namespace] else rns.namespaces, + ), + uidCheck = ruid => RequiredAuthXAuthorizations(uids = ruid.uids), + ) + + val actualAuthorizersForSignatures = toValidate.signatures.toSeq.forgetNE.foldMap { sig => + // Now let's determine which namespaces and uids actually delegated to any of the keys + val (actualNamespaceAuthorizationsWithRoot, rootKeys) = + required.namespacesWithRoot.flatMap { ns => + getAuthorizationCheckForNamespace(ns) + .getValidAuthorizationKey( + sig.signedBy, + requireRoot = true, + ) + .map(ns -> _) + }.unzip + val (actualNamespaceAuthorizations, nsKeys) = required.namespaces.flatMap { ns => + getAuthorizationCheckForNamespace(ns) + .getValidAuthorizationKey( + sig.signedBy, + requireRoot = false, + ) + .map(ns -> _) + }.unzip + val (actualUidAuthorizations, uidKeys) = + required.uids.flatMap { uid => + val authCheck = getAuthorizationCheckForNamespace(uid.namespace) + val keyForNamespace = authCheck + .getValidAuthorizationKey(sig.signedBy, requireRoot = false) + lazy val keyForUid = getAuthorizedIdentifierDelegation(authCheck, uid, Set(sig.signedBy)) + .map(_.mapping.target) + + keyForNamespace + .orElse(keyForUid) + .map(uid -> _) + }.unzip + + for { + _ <- Either.cond[TopologyTransactionRejection, Unit]( + // the key used for the signature must be a valid key for at least one of the delegation mechanisms + actualNamespaceAuthorizationsWithRoot.nonEmpty || actualNamespaceAuthorizations.nonEmpty || actualUidAuthorizations.nonEmpty, + (), + TopologyTransactionRejection.NotAuthorized, + ) + + keyForSignature <- (rootKeys ++ nsKeys ++ uidKeys).headOption + .toRight[TopologyTransactionRejection]( + TopologyTransactionRejection.NotAuthorized + ) + _ <- pureCrypto + .verifySignature(toValidate.transaction.hash.hash, keyForSignature, sig) + .leftMap(TopologyTransactionRejection.SignatureCheckFailed) + + } yield { + RequiredAuthXAuthorizations( + actualNamespaceAuthorizationsWithRoot, + actualNamespaceAuthorizations, + actualUidAuthorizations, + ) + } + } + + // and finally we can check whether the authorizations granted by the keys actually satisfy + // the authorization requirements + actualAuthorizersForSignatures.map { actual => + requiredAuth + .satisfiedByActualAuthorizers( + namespacesWithRoot = actual.namespacesWithRoot, + namespaces = actual.namespaces, + uids = actual.uids, + ) + .fold(identity, _ => RequiredAuthXAuthorizations.empty) + } + } + + private def getAuthorizedIdentifierDelegation( + graph: AuthorizationCheckX, + uid: UniqueIdentifier, + authKeys: Set[Fingerprint], + ): Option[AuthorizedIdentifierDelegationX] = { + getIdentifierDelegationsForUid(uid) + .find(aid => + authKeys(aid.mapping.target.id) && graph.areValidAuthorizationKeys( + aid.signingKeys, + requireRoot = false, + ) + ) + } + + protected def getIdentifierDelegationsForUid( + uid: UniqueIdentifier + ): Set[AuthorizedIdentifierDelegationX] = { + identifierDelegationCache + .getOrElse(uid, Set()) + } + + protected def getAuthorizationCheckForNamespace( + namespace: Namespace + ): AuthorizationCheckX = { + val unionspaceCheck = unionspaceCache.get(namespace).map(_._2) + val namespaceCheck = namespaceCache.get( + namespace + ) + unionspaceCheck + .orElse(namespaceCheck) + .getOrElse(AuthorizationCheckX.empty) + } + + protected def getAuthorizationGraphForNamespace( + namespace: Namespace + ): AuthorizationGraphX = { + namespaceCache.getOrElseUpdate( + namespace, + new AuthorizationGraphX(namespace, extraDebugInfo = false, loggerFactory), + ) + } + + protected def loadAuthorizationGraphs( + timestamp: CantonTimestamp, + namespaces: Set[Namespace], + )(implicit executionContext: ExecutionContext, traceContext: TraceContext): Future[Unit] = { + val uncachedNamespaces = + namespaces -- namespaceCache.keySet -- unionspaceCache.keySet // only load the ones we don't already hold in memory + + for { + // TODO(#12390) this doesn't find fully validated transactions from the same batch + storedUnionspaces <- store.findPositiveTransactions( + timestamp, + asOfInclusive = false, + isProposal = false, + types = Seq(UnionspaceDefinitionX.code), + filterUid = None, + filterNamespace = Some(uncachedNamespaces.toSeq), + ) + unionspaces = storedUnionspaces.result.flatMap( + _.transaction.selectMapping[UnionspaceDefinitionX] + ) + unionspaceOwnersToLoad = unionspaces + .flatMap(_.transaction.mapping.owners) + .toSet -- namespaceCache.keySet + namespacesToLoad = uncachedNamespaces ++ unionspaceOwnersToLoad + + storedNamespaceDelegations <- store.findPositiveTransactions( + timestamp, + asOfInclusive = false, + isProposal = false, + types = Seq(NamespaceDelegationX.code), + filterUid = None, + filterNamespace = Some(namespacesToLoad.toSeq), + ) + namespaceDelegations = storedNamespaceDelegations.result.flatMap( + _.transaction.selectMapping[NamespaceDelegationX] + ) + } yield { + val missingNSDs = + namespacesToLoad -- namespaceDelegations.map(_.transaction.mapping.namespace).toSet + if (missingNSDs.nonEmpty) + logger.debug(s"Didn't find a namespace delegations for $missingNSDs at $timestamp") + + val namespaceToTx = namespaceDelegations + .groupBy(_.transaction.mapping.namespace) + namespaceToTx + .foreach { case (namespace, transactions) => + ErrorUtil.requireArgument( + !namespaceCache.isDefinedAt(namespace), + s"graph shouldn't exist before loading ${namespaces} vs ${namespaceCache.keySet}", + ) + val graph = new AuthorizationGraphX( + namespace, + extraDebugInfo = false, + loggerFactory, + ) + namespaceCache.put(namespace, graph).discard + // use un-authorized batch load. while we are checking for proper authorization when we + // add a certificate the first time, we allow for the situation where an intermediate certificate + // is currently expired, but might be replaced with another cert. in this case, + // the authorization check would fail. + // unauthorized certificates are not really an issue as we'll simply exclude them when calculating + // the connected graph + graph.unauthorizedAdd(transactions.map(AuthorizedTopologyTransactionX(_))) + } + + unionspaces.foreach { us => + import us.transaction.mapping.unionspace + ErrorUtil.requireArgument( + !unionspaceCache.isDefinedAt(unionspace), + s"unionspace shouldn't already be cached before loading $unionspace vs ${unionspaceCache.keySet}", + ) + val graphs = us.transaction.mapping.owners.forgetNE.toSeq.map(ns => + namespaceCache.getOrElseUpdate( + ns, + new AuthorizationGraphX( + ns, + extraDebugInfo = false, + loggerFactory, + ), + ) + ) + val directUnionspaceGraph = namespaceCache.getOrElseUpdate( + unionspace, + new AuthorizationGraphX( + unionspace, + extraDebugInfo = false, + loggerFactory, + ), + ) + unionspaceCache + .put( + unionspace, + ( + us.transaction.mapping, + UnionspaceAuthorizationGraphX( + us.transaction.mapping, + directUnionspaceGraph, + graphs, + ), + ), + ) + .discard + } + } + } + + protected def loadIdentifierDelegations( + timestamp: CantonTimestamp, + namespaces: Seq[Namespace], + uids: Set[UniqueIdentifier], + )(implicit + traceContext: TraceContext, + executionContext: ExecutionContext, + ): Future[Set[UniqueIdentifier]] = { + val uidFilter = (uids -- identifierDelegationCache.keySet) + store + .findPositiveTransactions( + timestamp, + asOfInclusive = false, + isProposal = false, + types = Seq(IdentifierDelegationX.code), + filterUid = Some(uidFilter.toSeq), + filterNamespace = None, + ) + .map { stored => + val loaded = stored.result.flatMap( + _.transaction.selectMapping[IdentifierDelegationX].map(AuthorizedTopologyTransactionX(_)) + ) + val start = + identifierDelegationCache.keySet + .filter(cached => namespaces.contains(cached.namespace)) + .toSet + loaded.foldLeft(start) { case (acc, item) => + mergeLoadedIdentifierDelegation(item) + val uid = item.mapping.identifier + if (namespaces.contains(uid.namespace)) + acc + uid + else acc + } + + } + } + + private def mergeLoadedIdentifierDelegation(item: AuthorizedIdentifierDelegationX): Unit = + updateIdentifierDelegationCache(item.mapping.identifier, _ + item) + + protected def updateIdentifierDelegationCache( + uid: UniqueIdentifier, + op: Set[AuthorizedIdentifierDelegationX] => Set[AuthorizedIdentifierDelegationX], + ): Unit = { + val cur = identifierDelegationCache.getOrElseUpdate(uid, Set()) + identifierDelegationCache.update(uid, op(cur)).discard + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStateForInititalizationService.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStateForInititalizationService.scala new file mode 100644 index 0000000000..f4e47d4471 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStateForInititalizationService.scala @@ -0,0 +1,80 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.store + +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.GenericStoredTopologyTransactionsX +import com.digitalasset.canton.topology.store.TopologyStoreId.DomainStore +import com.digitalasset.canton.topology.{MediatorId, Member, ParticipantId} +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.{ExecutionContext, Future} + +trait TopologyStateForInitializationService { + def initialSnapshot(member: Member)(implicit + executionContext: ExecutionContext, + traceContext: TraceContext, + ): Future[GenericStoredTopologyTransactionsX] +} + +final class StoreBasedTopologyStateForInitializationService( + domainTopologyStore: TopologyStoreX[DomainStore], + val loggerFactory: NamedLoggerFactory, +) extends TopologyStateForInitializationService + with NamedLogging { + + /** Downloading the initial topology snapshot works as follows: + * + * 1. Determine the first MediatorDomainStateX or DomainTrustCertificateX that mentions the member to onboard. + * 2. Take its effective time (here t0') + * 3. Find all transactions with sequence time <= t0' + * 4. Find the maximum effective time of the transactions returned in 3. (here ts1') + * 5. Set all validUntil > ts1' to None + * + * TODO(#13394) adapt this logic to allow onboarding of a previously offboarded member + * + * {{{ + * + * t0 , t1 ... sequenced time + * t0', t1' ... effective time + * + * xxxxxxxxxxxxx + * xxx xxx + * t0 x t0' xx + * │ │ │ │ │ + * ├─────────┼────────┼────────┼───────┼────────► + * │ │ │ │ │ + * x t1 x t1' + * xx xx + * xx xx + * xx MDS/DTC xx + * }}} + */ + override def initialSnapshot(member: Member)(implicit + executionContext: ExecutionContext, + traceContext: TraceContext, + ): Future[GenericStoredTopologyTransactionsX] = { + val effectiveFromF = member match { + case participant @ ParticipantId(_) => + domainTopologyStore + .findFirstTrustCertificateForParticipant(participant) + .map(_.map(_.validFrom)) + case mediator @ MediatorId(_) => + domainTopologyStore.findFirstMediatorStateForMediator(mediator).map(_.map(_.validFrom)) + case _ => + // TODO(#12390) proper error + ??? + } + + effectiveFromF.flatMap { effectiveFromO => + effectiveFromO + .map { effectiveFrom => + logger.debug(s"Fetching initial topology state for $member at $effectiveFrom") + domainTopologyStore.findEssentialStateForMember(member, effectiveFrom.value) + } + // TODO(#12390) should this error out if nothing can be found? + .getOrElse(Future.successful(StoredTopologyTransactionsX.empty)) + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala new file mode 100644 index 0000000000..a6ac03b897 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala @@ -0,0 +1,947 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.store + +import cats.syntax.parallel.* +import cats.syntax.traverse.* +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString.DisplayName +import com.digitalasset.canton.config.CantonRequireTypes.{ + LengthLimitedString, + String255, + String256M, +} +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.config.RequireTypes.{PositiveInt, PositiveLong} +import com.digitalasset.canton.crypto.{PublicKey, SignatureCheckError} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.time.NonNegativeFiniteDuration +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.admin.v0 as topoV0 +import com.digitalasset.canton.topology.client.DomainTopologyClient +import com.digitalasset.canton.topology.processing.TransactionAuthorizationValidator.AuthorizationChain +import com.digitalasset.canton.topology.processing.{ + EffectiveTime, + SequencedTime, + SnapshotAuthorizationValidator, +} +import com.digitalasset.canton.topology.store.TopologyStoreId.DomainStore +import com.digitalasset.canton.topology.store.db.{DbPartyMetadataStore, DbTopologyStore} +import com.digitalasset.canton.topology.store.memory.{ + InMemoryPartyMetadataStore, + InMemoryTopologyStore, +} +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.MonadUtil +import com.digitalasset.canton.version.ProtocolVersion +import com.google.common.annotations.VisibleForTesting + +import scala.concurrent.duration.Duration +import scala.concurrent.{ExecutionContext, Future} + +final case class StoredTopologyTransaction[+Op <: TopologyChangeOp]( + sequenced: SequencedTime, + validFrom: EffectiveTime, + validUntil: Option[EffectiveTime], + transaction: SignedTopologyTransaction[Op], +) extends PrettyPrinting { + override def pretty: Pretty[StoredTopologyTransaction.this.type] = + prettyOfClass( + param("sequenced", _.sequenced.value), + param("validFrom", _.validFrom.value), + paramIfDefined("validUntil", _.validUntil.map(_.value)), + param("op", _.transaction.transaction.op), + param("mapping", _.transaction.transaction.element.mapping), + ) +} + +/** the party metadata used to inform the ledger api server + * + * the first class parameters correspond to the relevant information, whereas the + * second class parameters are synchronisation information used during crash recovery. + * we don't want these in an equality comparison. + */ +final case class PartyMetadata( + partyId: PartyId, + displayName: Option[DisplayName], + participantId: Option[ParticipantId], +)( + val effectiveTimestamp: CantonTimestamp, + val submissionId: String255, + val notified: Boolean = false, +) + +trait PartyMetadataStore extends AutoCloseable { + + def metadataForParty(partyId: PartyId)(implicit + traceContext: TraceContext + ): Future[Option[PartyMetadata]] + + final def insertOrUpdatePartyMetadata(metadata: PartyMetadata)(implicit + traceContext: TraceContext + ): Future[Unit] = { + insertOrUpdatePartyMetadata( + partyId = metadata.partyId, + participantId = metadata.participantId, + displayName = metadata.displayName, + effectiveTimestamp = metadata.effectiveTimestamp, + submissionId = metadata.submissionId, + ) + } + + def insertOrUpdatePartyMetadata( + partyId: PartyId, + participantId: Option[ParticipantId], + displayName: Option[DisplayName], + effectiveTimestamp: CantonTimestamp, + submissionId: String255, + )(implicit traceContext: TraceContext): Future[Unit] + + /** mark the given metadata as having been successfully forwarded to the domain */ + def markNotified(metadata: PartyMetadata)(implicit traceContext: TraceContext): Future[Unit] + + /** fetch the current set of party data which still needs to be notified */ + def fetchNotNotified()(implicit traceContext: TraceContext): Future[Seq[PartyMetadata]] + +} + +object PartyMetadataStore { + + def apply( + storage: Storage, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit + ec: ExecutionContext + ): PartyMetadataStore = + storage match { + case _: MemoryStorage => new InMemoryPartyMetadataStore() + case jdbc: DbStorage => new DbPartyMetadataStore(jdbc, timeouts, loggerFactory) + } + +} + +sealed trait TopologyStoreId extends PrettyPrinting { + def filterName: String = dbString.unwrap + def dbString: LengthLimitedString + def dbStringWithDaml2xUniquifier(uniquifier: String): LengthLimitedString +} + +object TopologyStoreId { + + /** A topology store storing sequenced topology transactions + * + * @param domainId the domain id of the store + * @param discriminator the discriminator of the store. used for mediator request store + * or in daml 2.x for embedded mediator topology stores + */ + final case class DomainStore(domainId: DomainId, discriminator: String = "") + extends TopologyStoreId { + private val dbStringWithoutDiscriminator = domainId.toLengthLimitedString + val dbString: LengthLimitedString = { + if (discriminator.isEmpty) dbStringWithoutDiscriminator + else + LengthLimitedString + .tryCreate(discriminator + "::", discriminator.length + 2) + .tryConcatenate(dbStringWithoutDiscriminator) + } + + override def pretty: Pretty[this.type] = { + if (discriminator.nonEmpty) { + prettyOfString(storeId => + show"${storeId.discriminator}${SafeSimpleString.delimiter}${storeId.domainId}" + ) + } else { + prettyOfParam(_.domainId) + } + } + + // The reason for this somewhat awkward method is backward compat with uniquifier inserted in the middle of + // discriminator and domain id. Can be removed once fully on daml 3.0: + override def dbStringWithDaml2xUniquifier(uniquifier: String): LengthLimitedString = { + require(uniquifier.nonEmpty) + LengthLimitedString + .tryCreate(discriminator + uniquifier + "::", discriminator.length + uniquifier.length + 2) + .tryConcatenate(dbStringWithoutDiscriminator) + } + } + + // authorized transactions (the topology managers store) + type AuthorizedStore = AuthorizedStore.type + object AuthorizedStore extends TopologyStoreId { + val dbString = String255.tryCreate("Authorized") + override def dbStringWithDaml2xUniquifier(uniquifier: String): LengthLimitedString = { + require(uniquifier.nonEmpty) + LengthLimitedString + .tryCreate(uniquifier + "::", uniquifier.length + 2) + .tryConcatenate(dbString) + } + + override def pretty: Pretty[AuthorizedStore.this.type] = prettyOfString( + _.dbString.unwrap + ) + } + + def apply(fName: String): TopologyStoreId = fName match { + case "Authorized" => AuthorizedStore + case domain => DomainStore(DomainId(UniqueIdentifier.tryFromProtoPrimitive(domain))) + } + + trait IdTypeChecker[A <: TopologyStoreId] { + def isOfType(id: TopologyStoreId): Boolean + } + + implicit val domainTypeChecker: IdTypeChecker[DomainStore] = new IdTypeChecker[DomainStore] { + override def isOfType(id: TopologyStoreId): Boolean = id match { + case DomainStore(_, _) => true + case AuthorizedStore => false + } + } + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def select[StoreId <: TopologyStoreId](store: TopologyStore[TopologyStoreId])(implicit + checker: IdTypeChecker[StoreId] + ): Option[TopologyStore[StoreId]] = if (checker.isOfType(store.storeId)) + Some(store.asInstanceOf[TopologyStore[StoreId]]) + else None + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def selectX[StoreId <: TopologyStoreId](store: TopologyStoreX[TopologyStoreId])(implicit + checker: IdTypeChecker[StoreId] + ): Option[TopologyStoreX[StoreId]] = if (checker.isOfType(store.storeId)) + Some(store.asInstanceOf[TopologyStoreX[StoreId]]) + else None + +} + +sealed trait TopologyTransactionRejection extends PrettyPrinting { + def asString: String + def asString1GB: String256M = + String256M.tryCreate(asString, Some("topology transaction rejection")) + + def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError +} +object TopologyTransactionRejection { + object NotAuthorized extends TopologyTransactionRejection { + override def asString: String = "Not authorized" + override def pretty: Pretty[NotAuthorized.type] = prettyOfString(_ => asString) + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext) = + TopologyManagerError.UnauthorizedTransaction.Failure() + } + + final case class ThresholdTooHigh(actual: Int, mustBeAtMost: Int) + extends TopologyTransactionRejection { + override def asString: String = + s"Threshold must not be higher than $mustBeAtMost, but was $actual." + + override def pretty: Pretty[ThresholdTooHigh] = prettyOfString(_ => asString) + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext) = { + TopologyManagerError.InvalidThreshold.ThresholdTooHigh(actual, mustBeAtMost) + } + } + + final case class SignatureCheckFailed(err: SignatureCheckError) + extends TopologyTransactionRejection { + override def asString: String = err.toString + override def pretty: Pretty[SignatureCheckFailed] = prettyOfClass(param("err", _.err)) + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext) = + TopologyManagerError.InvalidSignatureError.Failure(err) + } + final case class WrongDomain(wrong: DomainId) extends TopologyTransactionRejection { + override def asString: String = show"Wrong domain $wrong" + override def pretty: Pretty[WrongDomain] = prettyOfClass(param("wrong", _.wrong)) + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext) = + TopologyManagerError.WrongDomain.Failure(wrong) + } + final case class Duplicate(old: CantonTimestamp) extends TopologyTransactionRejection { + override def asString: String = show"Duplicate transaction from ${old}" + override def pretty: Pretty[Duplicate] = prettyOfClass(param("old", _.old)) + override def toTopologyManagerError(implicit elc: ErrorLoggingContext) = + TopologyManagerError.DuplicateTransaction.ExistsAt(old) + } + final case class SerialMismatch(expected: PositiveInt, actual: PositiveInt) + extends TopologyTransactionRejection { + override def asString: String = + show"The given serial $actual does not match the expected serial $expected" + override def pretty: Pretty[SerialMismatch] = + prettyOfClass(param("expected", _.expected), param("actual", _.actual)) + override def toTopologyManagerError(implicit elc: ErrorLoggingContext) = + TopologyManagerError.SerialMismatch.Failure(expected, actual) + } + final case class Other(str: String) extends TopologyTransactionRejection { + override def asString: String = str + override def pretty: Pretty[Other] = prettyOfString(_ => asString) + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext) = + TopologyManagerError.InternalError.Other(str) + } + + final case class ExtraTrafficLimitTooLow( + member: Member, + actual: PositiveLong, + expectedMinimum: PositiveLong, + ) extends TopologyTransactionRejection { + override def asString: String = + s"Extra traffic limit for $member should be at least $expectedMinimum, but was $actual." + + override def pretty: Pretty[ExtraTrafficLimitTooLow] = + prettyOfClass( + param("member", _.member), + param("actual", _.actual), + param("expectedMinimum", _.expectedMinimum), + ) + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.InvalidTrafficLimit.TrafficLimitTooLow(member, actual, expectedMinimum) + } + + final case class InsufficientKeys(members: Seq[Member]) extends TopologyTransactionRejection { + override def asString: String = + s"Members ${members.sorted.mkString(", ")} are missing a signing key or an encryption key or both." + + override def pretty: Pretty[InsufficientKeys] = prettyOfClass( + param("members", _.members) + ) + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.InsufficientKeys.Failure(members) + } + + final case class UnknownMembers(members: Seq[Member]) extends TopologyTransactionRejection { + override def asString: String = s"Members ${members.toSeq.sorted.mkString(", ")} are unknown." + + override def pretty: Pretty[UnknownMembers] = prettyOfClass(param("members", _.members)) + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.UnknownMembers.Failure(members) + } + + final case class ParticipantStillHostsParties(participantId: ParticipantId, parties: Seq[PartyId]) + extends TopologyTransactionRejection { + override def asString: String = + s"Cannot remove domain trust certificate for $participantId because it still hosts parties ${parties + .mkString(",")}" + + override def pretty: Pretty[ParticipantStillHostsParties] = + prettyOfClass(param("participantId", _.participantId), param("parties", _.parties)) + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.IllegalRemovalOfDomainTrustCertificate.ParticipantStillHostsParties( + participantId, + parties, + ) + } +} + +final case class ValidatedTopologyTransaction( + transaction: SignedTopologyTransaction[TopologyChangeOp], + rejectionReason: Option[TopologyTransactionRejection], +) + +object ValidatedTopologyTransaction { + def valid( + transaction: SignedTopologyTransaction[TopologyChangeOp] + ): ValidatedTopologyTransaction = + ValidatedTopologyTransaction(transaction, None) +} + +sealed trait TimeQuery { + def toProtoV0: topoV0.BaseQuery.TimeQuery +} +object TimeQuery { + + /** Determine the headstate. + */ + object HeadState extends TimeQuery { + override def toProtoV0: topoV0.BaseQuery.TimeQuery = + topoV0.BaseQuery.TimeQuery.HeadState(com.google.protobuf.empty.Empty()) + } + final case class Snapshot(asOf: CantonTimestamp) extends TimeQuery { + override def toProtoV0: topoV0.BaseQuery.TimeQuery = + topoV0.BaseQuery.TimeQuery.Snapshot(asOf.toProtoPrimitive) + } + final case class Range(from: Option[CantonTimestamp], until: Option[CantonTimestamp]) + extends TimeQuery { + override def toProtoV0: topoV0.BaseQuery.TimeQuery = topoV0.BaseQuery.TimeQuery.Range( + topoV0.BaseQuery.TimeRange(from.map(_.toProtoPrimitive), until.map(_.toProtoPrimitive)) + ) + } + + def fromProto( + proto: topoV0.BaseQuery.TimeQuery, + fieldName: String, + ): ParsingResult[TimeQuery] = + proto match { + case topoV0.BaseQuery.TimeQuery.Empty => + Left(ProtoDeserializationError.FieldNotSet(fieldName)) + case topoV0.BaseQuery.TimeQuery.Snapshot(value) => + CantonTimestamp.fromProtoPrimitive(value).map(Snapshot) + case topoV0.BaseQuery.TimeQuery.HeadState(_) => Right(HeadState) + case topoV0.BaseQuery.TimeQuery.Range(value) => + for { + fromO <- value.from.traverse(CantonTimestamp.fromProtoPrimitive) + toO <- value.until.traverse(CantonTimestamp.fromProtoPrimitive) + } yield Range(fromO, toO) + } + +} + +trait TopologyStoreCommon[+StoreID <: TopologyStoreId, ValidTx, StoredTx, SignedTx] + extends FlagCloseable { + + this: NamedLogging => + + protected implicit def ec: ExecutionContext + + def storeId: StoreID + + /** fetch the effective time updates greater than or equal to a certain timestamp + * + * this function is used to recover the future effective timestamp such that we can reschedule "pokes" of the + * topology client and updates of the acs commitment processor on startup + */ + def findUpcomingEffectiveChanges(asOfInclusive: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[Seq[TopologyStore.Change]] + + def maxTimestamp()(implicit + traceContext: TraceContext + ): Future[Option[(SequencedTime, EffectiveTime)]] + + /** returns the current dispatching watermark + * + * for topology transaction dispatching, we keep track up to which point in time + * we have mirrored the authorized store to the remote store + * + * the timestamp always refers to the timestamp of the authorized store! + */ + def currentDispatchingWatermark(implicit + traceContext: TraceContext + ): Future[Option[CantonTimestamp]] + + /** update the dispatching watermark for this target store */ + def updateDispatchingWatermark(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[Unit] + + protected def signedTxFromStoredTx(storedTx: StoredTx): SignedTx + + def providesAdditionalSignatures(transaction: SignedTx)(implicit + traceContext: TraceContext + ): Future[Boolean] + + final def exists(transaction: SignedTx)(implicit + traceContext: TraceContext + ): Future[Boolean] = findStored(transaction).map(_.exists(signedTxFromStoredTx(_) == transaction)) + + def findStored(transaction: SignedTx, includeRejected: Boolean = false)(implicit + traceContext: TraceContext + ): Future[Option[StoredTx]] +} + +object TopologyStoreCommon { + + type DomainStoreCommon = TopologyStoreCommon[DomainStore, _, _, _] + +} + +abstract class TopologyStore[+StoreID <: TopologyStoreId](implicit + ec: ExecutionContext +) extends AutoCloseable + with TopologyStoreCommon[StoreID, ValidatedTopologyTransaction, StoredTopologyTransaction[ + TopologyChangeOp + ], SignedTopologyTransaction[TopologyChangeOp]] { + this: NamedLogging => + + /** add validated topology transaction as is to the topology transaction table */ + def append( + sequenced: SequencedTime, + effective: EffectiveTime, + transactions: Seq[ValidatedTopologyTransaction], + )(implicit + traceContext: TraceContext + ): Future[Unit] + + /** returns transactions that should be dispatched to the domain */ + def findDispatchingTransactionsAfter( + timestampExclusive: CantonTimestamp, + limit: Option[Int] = None, + )(implicit traceContext: TraceContext): Future[StoredTopologyTransactions[TopologyChangeOp]] + + /** returns initial set of onboarding transactions that should be dispatched to the domain */ + def findParticipantOnboardingTransactions(participantId: ParticipantId, domainId: DomainId)( + implicit traceContext: TraceContext + ): FutureUnlessShutdown[Seq[SignedTopologyTransaction[TopologyChangeOp]]] + + /** returns an descending ordered list of timestamps of when participant state changes occurred before a certain point in time */ + def findTsOfParticipantStateChangesBefore( + beforeExclusive: CantonTimestamp, + participantId: ParticipantId, + limit: Int, + )(implicit traceContext: TraceContext): Future[Seq[CantonTimestamp]] + + /** Returns an ordered list of transactions from the transaction store within a certain range */ + def findTransactionsInRange(asOfExclusive: CantonTimestamp, upToExclusive: CantonTimestamp)( + implicit traceContext: TraceContext + ): Future[StoredTopologyTransactions[TopologyChangeOp]] + + def timestamp(useStateStore: Boolean = false)(implicit + traceContext: TraceContext + ): Future[Option[(SequencedTime, EffectiveTime)]] + + override def maxTimestamp()(implicit + traceContext: TraceContext + ): Future[Option[(SequencedTime, EffectiveTime)]] = timestamp(useStateStore = true) + + /** set of topology transactions which are active */ + def headTransactions(implicit + traceContext: TraceContext + ): Future[StoredTopologyTransactions[TopologyChangeOp.Positive]] + + /** finds transactions in the local store that would remove the topology state elements */ + def findRemovalTransactionForMappings( + mappings: Set[TopologyStateElement[TopologyMapping]] + )(implicit + traceContext: TraceContext + ): Future[Seq[SignedTopologyTransaction[TopologyChangeOp.Remove]]] + + def findPositiveTransactionsForMapping(mapping: TopologyMapping)(implicit + traceContext: TraceContext + ): Future[Seq[SignedTopologyTransaction[TopologyChangeOp.Positive]]] + + @VisibleForTesting + def allTransactions(includeRejected: Boolean = false)(implicit + traceContext: TraceContext + ): Future[StoredTopologyTransactions[TopologyChangeOp]] + + def findStoredNoSignature(transaction: TopologyTransaction[TopologyChangeOp])(implicit + traceContext: TraceContext + ): Future[Seq[StoredTopologyTransaction[TopologyChangeOp]]] + + def findStoredForVersion( + transaction: TopologyTransaction[TopologyChangeOp], + protocolVersion: ProtocolVersion, + )(implicit + traceContext: TraceContext + ): Future[Option[StoredTopologyTransaction[TopologyChangeOp]]] + + /** Bootstrap a node state from a topology transaction collection */ + def bootstrap( + collection: StoredTopologyTransactions[TopologyChangeOp.Positive] + )(implicit traceContext: TraceContext): Future[Unit] = { + val groupedBySequencedAndValidFrom = collection.result + .groupBy(x => (x.sequenced, x.validFrom)) + .toList + .sortBy { case ((_, validFrom), _) => validFrom } + MonadUtil + .sequentialTraverse_(groupedBySequencedAndValidFrom) { + case ((sequenced, effective), transactions) => + val txs = transactions.map(tx => ValidatedTopologyTransaction(tx.transaction, None)) + for { + _ <- append(sequenced, effective, txs) + _ <- updateState( + sequenced, + effective, + deactivate = Seq.empty, + positive = transactions.map(_.transaction), + ) + } yield () + } + } + + /** returns the set of positive transactions + * + * this function is used by the topology processor to determine the set of transaction, such that + * we can perform cascading updates if there was a certificate revocation + * + * @param asOfInclusive whether the search interval should include the current timepoint or not. the state at t is + * defined as "exclusive" of t, whereas for updating the state, we need to be able to query inclusive. + * @param includeSecondary some topology transactions have an "secondary" uid. currently, this only applies to the + * party to participant mapping where the secondary uid is the participant uid. + * we need this information during cascading updates of participant certificates. + */ + def findPositiveTransactions( + asOf: CantonTimestamp, + asOfInclusive: Boolean, + includeSecondary: Boolean, + types: Seq[DomainTopologyTransactionType], + filterUid: Option[Seq[UniqueIdentifier]], + filterNamespace: Option[Seq[Namespace]], + )(implicit + traceContext: TraceContext + ): Future[PositiveStoredTopologyTransactions] + + /** query interface used by DomainTopologyManager to find the set of initial keys */ + def findInitialState(id: DomainTopologyManagerId)(implicit + traceContext: TraceContext + ): Future[Map[Member, Seq[PublicKey]]] + + /** update active topology transaction to the active topology transaction table + * + * active means that for the key authorizing the transaction, there is a connected path to reach the root certificate + */ + def updateState( + sequenced: SequencedTime, + effective: EffectiveTime, + deactivate: Seq[UniquePath], + positive: Seq[SignedTopologyTransaction[TopologyChangeOp.Positive]], + )(implicit traceContext: TraceContext): Future[Unit] + + /** query optimized for inspection + * + * @param recentTimestampO if exists, use this timestamp for the head state to prevent race conditions on the console + */ + def inspect( + stateStore: Boolean, + timeQuery: TimeQuery, + recentTimestampO: Option[CantonTimestamp], + ops: Option[TopologyChangeOp], + typ: Option[DomainTopologyTransactionType], + idFilter: String, + namespaceOnly: Boolean, + )(implicit + traceContext: TraceContext + ): Future[StoredTopologyTransactions[TopologyChangeOp]] + + def inspectKnownParties( + timestamp: CantonTimestamp, + filterParty: String, + filterParticipant: String, + limit: Int, + )(implicit traceContext: TraceContext): Future[Set[PartyId]] + + /** find active topology transactions + * + * active / state means that for the key authorizing the transaction, there is a connected path to reach the root certificate + * this function is used for updating and by the lookup client [[com.digitalasset.canton.topology.client.StoreBasedTopologySnapshot]] + * + * @param asOfInclusive whether the search interval should include the current timepoint or not. the state at t is + * defined as "exclusive" of t, whereas for updating the state, we need to be able to query inclusive. + * @param includeSecondary some topology transactions have an "secondary" uid. currently, this only applies to the + * party to participant mapping where the secondary uid is the participant uid. + * we need this information during cascading updates of participant certificates. + */ + def findStateTransactions( + asOf: CantonTimestamp, + asOfInclusive: Boolean, + includeSecondary: Boolean, + types: Seq[DomainTopologyTransactionType], + filterUid: Option[Seq[UniqueIdentifier]], + filterNamespace: Option[Seq[Namespace]], + )(implicit traceContext: TraceContext): Future[PositiveStoredTopologyTransactions] + + override protected def signedTxFromStoredTx( + storedTx: StoredTopologyTransaction[TopologyChangeOp] + ): SignedTopologyTransaction[TopologyChangeOp] = storedTx.transaction + + override def providesAdditionalSignatures( + transaction: SignedTopologyTransaction[TopologyChangeOp] + )(implicit traceContext: TraceContext): Future[Boolean] = + exists(transaction).map(exists => !exists) +} + +object TopologyStore { + + def apply[StoreID <: TopologyStoreId]( + storeId: StoreID, + storage: Storage, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + futureSupervisor: FutureSupervisor, + )(implicit + ec: ExecutionContext + ): TopologyStore[StoreID] = + storage match { + case _: MemoryStorage => + (new InMemoryTopologyStore(storeId, loggerFactory, timeouts, futureSupervisor)) + + case jdbc: DbStorage => + new DbTopologyStore(jdbc, storeId, timeouts, loggerFactory, futureSupervisor) + + } + + sealed trait Change extends Product with Serializable { + def sequenced: SequencedTime + def effective: EffectiveTime + } + + object Change { + final case class TopologyDelay( + sequenced: SequencedTime, + effective: EffectiveTime, + epsilon: NonNegativeFiniteDuration, + ) extends Change + final case class Other(sequenced: SequencedTime, effective: EffectiveTime) extends Change + + def accumulateUpcomingEffectiveChanges( + items: Seq[StoredTopologyTransaction[TopologyChangeOp]] + ): Seq[TopologyStore.Change] = { + items + .map(x => (x, x.transaction.transaction.element.mapping)) + .map { + case (tx, x: DomainParametersChange) => + TopologyDelay(tx.sequenced, tx.validFrom, x.domainParameters.topologyChangeDelay) + case (tx, _) => Other(tx.sequenced, tx.validFrom) + } + .sortBy(_.effective) + .distinct + } + + } + + private[topology] final case class InsertTransaction( + transaction: SignedTopologyTransaction[TopologyChangeOp], + validUntil: Option[CantonTimestamp], + rejectionReason: Option[TopologyTransactionRejection], + ) { + def op: TopologyChangeOp = transaction.transaction.op + } + + /** collect all actions on the topology store during an append + * + * this function computes the paths of all transactions that will "expire" because of some + * removal in this update block and calculates what needs to be inserted into the store. + * we also insert transient data (for debugability and completeness). + */ + private[topology] def appends( + timestamp: CantonTimestamp, + transactions: Seq[ValidatedTopologyTransaction], + )(implicit loggingContext: ErrorLoggingContext): ( + Set[UniquePath], // updates + Seq[InsertTransaction], + ) = { + val logger = loggingContext.logger + implicit val traceContext: TraceContext = loggingContext.traceContext + + final case class PendingInsert(include: Boolean, entry: InsertTransaction) + type PendingInsertIdx = Int + + // random access array in order to adjust pending insertions + val inserts = new Array[PendingInsert](transactions.length) + + def adjustPending( + index: PendingInsertIdx, + pending: Map[UniquePath, Seq[PendingInsertIdx]], + warnOnDuplicate: Boolean = true, + ): Map[UniquePath, Seq[PendingInsertIdx]] = { + val pendingInsert = inserts(index) + val op = pendingInsert.entry.op + val path = pendingInsert.entry.transaction.uniquePath + + val previous = pending.getOrElse(path, Seq()) + val previousI = previous.map(ii => (ii, inserts(ii))).filter { case (_, pendingInsert) => + pendingInsert.include + } + + op match { + // if this one is an add (resp., replace): only dedupe conflicting adds (resp., replaces) + case TopologyChangeOp.Add | TopologyChangeOp.Replace => + previousI.foreach { case (ii, item) => + // ignore conflicting add + if (item.entry.op == op) { + inserts(ii) = item.copy(include = false) + if (warnOnDuplicate) + logger.warn( + s"Discarding duplicate ${op.toString} (#$ii): ${item.entry.transaction.uniquePath}" + ) + } + // malicious domain: theoretically we could check here if a certificate has already been revoked + // previously. however, we assume that the domain topology manager would not do that generally (and we would + // have to check this also against all revocations in the database as well). + // TODO(i4933) check for permanent revocations + } + // if this one is a remove: deprecate pending adds and dedupe conflicting removes + case TopologyChangeOp.Remove => + previousI.foreach { case (ii, item) => + if (item.entry.op == TopologyChangeOp.Remove) { + // ignore conflicting remove + inserts(ii) = item.copy(include = false) + logger.info( + s"Discarding conflicting removal (#$ii): ${item.entry.transaction.uniquePath}" + ) + } else { + // deprecate pending add + inserts(ii) = item.copy(entry = item.entry.copy(validUntil = Some(timestamp))) + } + } + } + pending + (path -> (previous :+ index)) + } + + def validUntil(x: SignedTopologyTransaction[TopologyChangeOp]): Option[CantonTimestamp] = + x.operation match { + case TopologyChangeOp.Remove => Some(timestamp) + case _ => None + } + + // iterate over all transactions and adjust the validity period of any transient or special transaction + val (updates, _) = + transactions.zipWithIndex.foldLeft( + (Set.empty[UniquePath], Map.empty[UniquePath, Seq[PendingInsertIdx]]) + ) { + case ( + (updates, pending), + (ValidatedTopologyTransaction(x: SignedTopologyTransaction[_], reason), index), + ) => + inserts(index) = PendingInsert( + include = true, + InsertTransaction(x, validUntil(x), reason), + ) + + (x.transaction.op: TopologyChangeOp) match { + case TopologyChangeOp.Remove | TopologyChangeOp.Replace => + // if this removal (or replace) is not authorized, then don't update the current exiting records + val newUpdates = + if (reason.isEmpty) + updates + x.uniquePath + else updates + (newUpdates, adjustPending(index, pending)) + + case TopologyChangeOp.Add => (updates, adjustPending(index, pending)) + } + } + + ( + updates, + inserts.collect { + case insert if insert.include => + val insertTx = insert.entry + // mark all rejected transactions to be validFrom = validUntil + insertTx.rejectionReason.fold(insertTx)(_ => insertTx.copy(validUntil = Some(timestamp))) + }.toSeq, + ) + } + + /** Initial state accumulator + * + * Initially, when bootstrapping a domain, we need to know the domain topology manager and the sequencer + * key(s) before they have been sequenced. Therefore, we'll look at the couple of first transactions of the + * authorized domain topology store. + * + * This accumulator should be iterated over until the boolean flag says its done. + */ + private[topology] def findInitialStateAccumulator( + uid: UniqueIdentifier, + accumulated: Map[Member, Seq[PublicKey]], + transaction: SignedTopologyTransaction[TopologyChangeOp], + ): (Boolean, Map[Member, Seq[PublicKey]]) = { + // we are done once we observe a transaction that does not act on our uid + val done = + transaction.uniquePath.maybeUid.nonEmpty && !transaction.uniquePath.maybeUid.contains(uid) && + accumulated.isDefinedAt(SequencerId(uid)) && accumulated.isDefinedAt( + DomainTopologyManagerId(uid) + ) + if (done || transaction.uniquePath.dbType != DomainTopologyTransactionType.OwnerToKeyMapping) { + (done, accumulated) + } else { + transaction match { + case SignedTopologyTransaction( + TopologyStateUpdate( + TopologyChangeOp.Add, + TopologyStateUpdateElement(_, OwnerToKeyMapping(owner, key)), + ), + _, + _, + ) if owner.code == SequencerId.Code || owner.code == DomainTopologyManagerId.Code => + (false, accumulated.updated(owner, accumulated.getOrElse(owner, Seq()) :+ key)) + case _ => (false, accumulated) + } + } + } + + lazy val initialParticipantDispatchingSet = Set( + DomainTopologyTransactionType.ParticipantState, + DomainTopologyTransactionType.OwnerToKeyMapping, + DomainTopologyTransactionType.SignedLegalIdentityClaim, + ) + + def filterInitialParticipantDispatchingTransactions( + participantId: ParticipantId, + domainId: DomainId, + store: TopologyStore[TopologyStoreId], + loggerFactory: NamedLoggerFactory, + transactions: StoredTopologyTransactions[TopologyChangeOp], + timeouts: ProcessingTimeout, + futureSupervisor: FutureSupervisor, + )(implicit + traceContext: TraceContext, + executionContext: ExecutionContext, + ): FutureUnlessShutdown[Seq[SignedTopologyTransaction[TopologyChangeOp]]] = { + val logger = loggerFactory.getLogger(getClass) + def includeState(mapping: TopologyStateUpdateMapping): Boolean = mapping match { + case NamespaceDelegation(_, _, _) | IdentifierDelegation(_, _) => + // note for future devs: this function here should only be supplied with core mappings that need to be + // sent to the topology manager on bootstrapping. so the query that picks these transactions up should + // not include namespace delegations and therelike + // note that we'll pick up the necessary certificates further below + logger.error("Initial dispatching should not include namespace or identifier delegations") + false + case OwnerToKeyMapping(pid, _) => pid == participantId + case SignedLegalIdentityClaim(uid, _, _) => uid == participantId.uid + case ParticipantState(_, _, pid, _, _) => pid == participantId + case PartyToParticipant(_, _, _, _) => false + case VettedPackages(_, _) => false + case MediatorDomainState(_, _, _) => false + } + def include(mapping: TopologyMapping): Boolean = mapping match { + case mapping: TopologyStateUpdateMapping => includeState(mapping) + case _ => false + } + val validator = + new SnapshotAuthorizationValidator( + CantonTimestamp.MaxValue, + store, + timeouts, + loggerFactory, + futureSupervisor, + ) + val filtered = transactions.result.filter(tx => + tx.transaction.transaction.element.mapping.restrictedToDomain + .forall(_ == domainId) && include(tx.transaction.transaction.element.mapping) + ) + val authF = filtered.toList + .parFlatTraverse(tx => + validator + .authorizedBy(tx.transaction) + .map(_.toList) + ) + .map(_.foldLeft(AuthorizationChain.empty) { case (acc, elem) => acc.merge(elem) }) + authF.map { chain => + // put all transactions into the correct order to ensure that the authorizations come first + chain.namespaceDelegations.map(_.transaction) ++ chain.identifierDelegation.map( + _.transaction + ) ++ filtered.map(_.transaction) + } + } + + /** convenience method waiting until the last eligible transaction inserted into the source store has been dispatched successfully to the target domain */ + def awaitTxObserved( + client: DomainTopologyClient, + transaction: SignedTopologyTransaction[TopologyChangeOp], + target: TopologyStore[DomainStore], + timeout: Duration, + )(implicit + traceContext: TraceContext, + executionContext: ExecutionContext, + ): FutureUnlessShutdown[Boolean] = { + client.await( + // we know that the transaction is stored and effective once we find it in the target + // domain store and once the effective time (valid from) is smaller than the client timestamp + sp => target.findStored(transaction).map(_.exists(_.validFrom.value < sp.timestamp)), + timeout, + ) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStoreX.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStoreX.scala new file mode 100644 index 0000000000..84458d1a30 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStoreX.scala @@ -0,0 +1,380 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.store + +import cats.syntax.traverse.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.admin.v1 as topoV1 +import com.digitalasset.canton.topology.client.DomainTopologyClient +import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} +import com.digitalasset.canton.topology.store.StoredTopologyTransactionX.GenericStoredTopologyTransactionX +import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.{ + GenericStoredTopologyTransactionsX, + PositiveStoredTopologyTransactionsX, +} +import com.digitalasset.canton.topology.store.TopologyStore.Change.{Other, TopologyDelay} +import com.digitalasset.canton.topology.store.TopologyTransactionRejection.Duplicate +import com.digitalasset.canton.topology.store.ValidatedTopologyTransactionX.GenericValidatedTopologyTransactionX +import com.digitalasset.canton.topology.store.db.DbTopologyStoreX +import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStoreX +import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX +import com.digitalasset.canton.topology.transaction.TopologyMappingX.MappingHash +import com.digitalasset.canton.topology.transaction.TopologyTransactionX.{ + GenericTopologyTransactionX, + TxHash, +} +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.ProtocolVersion + +import scala.concurrent.duration.Duration +import scala.concurrent.{ExecutionContext, Future} +import scala.reflect.ClassTag + +final case class StoredTopologyTransactionX[+Op <: TopologyChangeOpX, +M <: TopologyMappingX]( + sequenced: SequencedTime, + validFrom: EffectiveTime, + validUntil: Option[EffectiveTime], + transaction: SignedTopologyTransactionX[Op, M], +) extends PrettyPrinting { + override def pretty: Pretty[StoredTopologyTransactionX.this.type] = + prettyOfClass( + param("sequenced", _.sequenced.value), + param("validFrom", _.validFrom.value), + paramIfDefined("validUntil", _.validUntil.map(_.value)), + param("op", _.transaction.transaction.op), + param("serial", _.transaction.transaction.serial), + param("mapping", _.transaction.transaction.mapping), + param("signatures", _.transaction.signatures), + ) + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def selectMapping[TargetMapping <: TopologyMappingX: ClassTag] = transaction + .selectMapping[TargetMapping] + .map(_ => this.asInstanceOf[StoredTopologyTransactionX[Op, TargetMapping]]) + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def selectOp[TargetOp <: TopologyChangeOpX: ClassTag] = transaction + .selectOp[TargetOp] + .map(_ => this.asInstanceOf[StoredTopologyTransactionX[TargetOp, M]]) + + def mapping: M = transaction.transaction.mapping +} + +object StoredTopologyTransactionX { + type GenericStoredTopologyTransactionX = + StoredTopologyTransactionX[TopologyChangeOpX, TopologyMappingX] +} + +final case class ValidatedTopologyTransactionX[+Op <: TopologyChangeOpX, +M <: TopologyMappingX]( + transaction: SignedTopologyTransactionX[Op, M], + rejectionReason: Option[TopologyTransactionRejection] = None, + expireImmediately: Boolean = false, +) { + def nonDuplicateRejectionReason: Option[TopologyTransactionRejection] = rejectionReason match { + case Some(Duplicate(_)) => None + case otherwise => otherwise + } + + def collectOfMapping[TargetM <: TopologyMappingX: ClassTag] + : Option[ValidatedTopologyTransactionX[Op, TargetM]] = + transaction.selectMapping[TargetM].map(tx => copy[Op, TargetM](transaction = tx)) + + def collectOf[TargetO <: TopologyChangeOpX: ClassTag, TargetM <: TopologyMappingX: ClassTag] + : Option[ValidatedTopologyTransactionX[TargetO, TargetM]] = + transaction.select[TargetO, TargetM].map(tx => copy[TargetO, TargetM](transaction = tx)) +} + +object ValidatedTopologyTransactionX { + type GenericValidatedTopologyTransactionX = + ValidatedTopologyTransactionX[TopologyChangeOpX, TopologyMappingX] +} +abstract class TopologyStoreX[+StoreID <: TopologyStoreId](implicit + val ec: ExecutionContext +) extends AutoCloseable + with TopologyStoreCommon[ + StoreID, + GenericValidatedTopologyTransactionX, + GenericStoredTopologyTransactionX, + GenericSignedTopologyTransactionX, + ] { + this: NamedLogging => + + def findTransactionsByTxHash(asOfExclusive: EffectiveTime, hashes: NonEmpty[Set[TxHash]])(implicit + traceContext: TraceContext + ): Future[Seq[GenericSignedTopologyTransactionX]] + + def findProposalsByTxHash(asOfExclusive: EffectiveTime, hashes: NonEmpty[Set[TxHash]])(implicit + traceContext: TraceContext + ): Future[Seq[GenericSignedTopologyTransactionX]] + + def findTransactionsForMapping(asOfExclusive: EffectiveTime, hashes: NonEmpty[Set[MappingHash]])( + implicit traceContext: TraceContext + ): Future[Seq[GenericSignedTopologyTransactionX]] + + /** returns the set of positive transactions + * + * this function is used by the topology processor to determine the set of transaction, such that + * we can perform cascading updates if there was a certificate revocation + * + * @param asOfInclusive whether the search interval should include the current timepoint or not. the state at t is + * defined as "exclusive" of t, whereas for updating the state, we need to be able to query inclusive. + */ + def findPositiveTransactions( + asOf: CantonTimestamp, + asOfInclusive: Boolean, + isProposal: Boolean, + types: Seq[TopologyMappingX.Code], + filterUid: Option[Seq[UniqueIdentifier]], + filterNamespace: Option[Seq[Namespace]], + )(implicit + traceContext: TraceContext + ): Future[PositiveStoredTopologyTransactionsX] + + /** add validated topology transaction as is to the topology transaction table */ + def update( + sequenced: SequencedTime, + effective: EffectiveTime, + removeMapping: Set[MappingHash], + removeTxs: Set[TxHash], + additions: Seq[GenericValidatedTopologyTransactionX], + )(implicit + traceContext: TraceContext + ): Future[Unit] + + // TODO(#14048) only a temporary crutch to inspect the topology state + def dumpStoreContent()(implicit traceContext: TraceContext): Unit + + /** store an initial set of topology transactions as given into the store */ + def bootstrap(snapshot: GenericStoredTopologyTransactionsX)(implicit + traceContext: TraceContext + ): Future[Unit] + + /** query optimized for inspection + * + * @param proposals if true, query only for proposals instead of approved transaction mappings + * @param recentTimestampO if exists, use this timestamp for the head state to prevent race conditions on the console + */ + def inspect( + proposals: Boolean, + timeQuery: TimeQueryX, + // TODO(#14048) - consider removing `recentTimestampO` and moving callers to TimeQueryX.Snapshot + recentTimestampO: Option[CantonTimestamp], + op: Option[TopologyChangeOpX], + typ: Option[TopologyMappingX.Code], + idFilter: String, + namespaceOnly: Boolean, + )(implicit + traceContext: TraceContext + ): Future[StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX]] + + def inspectKnownParties( + timestamp: CantonTimestamp, + filterParty: String, + filterParticipant: String, + limit: Int, + )(implicit traceContext: TraceContext): Future[Set[PartyId]] + + def findFirstMediatorStateForMediator( + mediatorId: MediatorId + )(implicit + traceContext: TraceContext + ): Future[Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, MediatorDomainStateX]]] + + def findFirstTrustCertificateForParticipant( + participant: ParticipantId + )(implicit + traceContext: TraceContext + ): Future[Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, DomainTrustCertificateX]]] + + def findEssentialStateForMember( + member: Member, + asOfInclusive: CantonTimestamp, + )(implicit traceContext: TraceContext): Future[GenericStoredTopologyTransactionsX] + + protected def signedTxFromStoredTx( + storedTx: GenericStoredTopologyTransactionX + ): SignedTopologyTransactionX[TopologyChangeOpX, TopologyMappingX] = storedTx.transaction + + override def providesAdditionalSignatures( + transaction: GenericSignedTopologyTransactionX + )(implicit traceContext: TraceContext): Future[Boolean] = { + findStored(transaction).map(_.forall { inStore => + // check whether source still could provide an additional signature + transaction.signatures.diff(inStore.transaction.signatures.forgetNE).nonEmpty && + // but only if the transaction in the target store is a valid proposal + inStore.transaction.isProposal && + inStore.validUntil.isEmpty + }) + } + + /** returns initial set of onboarding transactions that should be dispatched to the domain */ + def findParticipantOnboardingTransactions(participantId: ParticipantId, domainId: DomainId)( + implicit traceContext: TraceContext + ): FutureUnlessShutdown[Seq[GenericSignedTopologyTransactionX]] + + def findDispatchingTransactionsAfter( + timestampExclusive: CantonTimestamp, + limit: Option[Int], + )(implicit + traceContext: TraceContext + ): Future[GenericStoredTopologyTransactionsX] + + def findStoredForVersion( + transaction: GenericTopologyTransactionX, + protocolVersion: ProtocolVersion, + )(implicit + traceContext: TraceContext + ): Future[Option[GenericStoredTopologyTransactionX]] +} + +object TopologyStoreX { + def accumulateUpcomingEffectiveChanges( + items: Seq[StoredTopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] + ): Seq[TopologyStore.Change] = { + items + .map(x => (x, x.transaction.transaction.mapping)) + .map { + case (tx, x: DomainParametersStateX) => + TopologyDelay(tx.sequenced, tx.validFrom, x.parameters.topologyChangeDelay) + case (tx, _) => Other(tx.sequenced, tx.validFrom) + } + .sortBy(_.effective) + .distinct + } + + def apply[StoreID <: TopologyStoreId]( + storeId: StoreID, + storage: Storage, + timeouts: ProcessingTimeout, + loggerFactory: NamedLoggerFactory, + )(implicit + ec: ExecutionContext + ): TopologyStoreX[StoreID] = { + val storeLoggerFactory = loggerFactory.append("store", storeId.toString) + storage match { + case _: MemoryStorage => + new InMemoryTopologyStoreX(storeId, storeLoggerFactory, timeouts) + case dbStorage: DbStorage => + new DbTopologyStoreX(dbStorage, storeId, timeouts, storeLoggerFactory) + } + } + + lazy val initialParticipantDispatchingSet = Set( + TopologyMappingX.Code.DomainTrustCertificateX, + TopologyMappingX.Code.OwnerToKeyMappingX, + // TODO(#14060) - potentially revisit this once we implement TopologyStoreX.filterInitialParticipantDispatchingTransactions + TopologyMappingX.Code.NamespaceDelegationX, + TopologyMappingX.Code.IdentifierDelegationX, + TopologyMappingX.Code.UnionspaceDefinitionX, + ) + + def filterInitialParticipantDispatchingTransactions( + participantId: ParticipantId, + domainId: DomainId, + transactions: Seq[GenericStoredTopologyTransactionX], + ): Seq[GenericSignedTopologyTransactionX] = { + // TODO(#14060): Extend filtering along the lines of: + // TopologyStore.filterInitialParticipantDispatchingTransactions + transactions.map(_.transaction).collect { + case tx @ SignedTopologyTransactionX( + TopologyTransactionX(_, _, DomainTrustCertificateX(`participantId`, `domainId`, _, _)), + _, + _, + ) => + tx + case tx @ SignedTopologyTransactionX( + TopologyTransactionX(_, _, OwnerToKeyMappingX(`participantId`, _, _)), + _, + _, + ) => + tx + case tx @ SignedTopologyTransactionX( + TopologyTransactionX(_, _, NamespaceDelegationX(ns, _, _)), + _, + _, + ) if ns == participantId.uid.namespace => + tx + case tx @ SignedTopologyTransactionX( + TopologyTransactionX(_, _, IdentifierDelegationX(uid, _)), + _, + _, + ) if uid == participantId.uid => + tx + case tx @ SignedTopologyTransactionX( + TopologyTransactionX(_, _, _: UnionspaceDefinitionX), + _, + _, + ) => + tx + } + } + + /** convenience method waiting until the last eligible transaction inserted into the source store has been dispatched successfully to the target domain */ + def awaitTxObserved( + client: DomainTopologyClient, + transaction: GenericSignedTopologyTransactionX, + target: TopologyStoreX[?], + timeout: Duration, + )(implicit + traceContext: TraceContext, + executionContext: ExecutionContext, + ): FutureUnlessShutdown[Boolean] = { + client.await( + // we know that the transaction is stored and effective once we find it in the target + // domain store and once the effective time (valid from) is smaller than the client timestamp + sp => + target + .findStored(transaction, includeRejected = true) + .map(_.exists(_.validFrom.value < sp.timestamp)), + timeout, + ) + } +} + +sealed trait TimeQueryX { + def toProtoV1: topoV1.BaseQuery.TimeQuery +} +object TimeQueryX { + object HeadState extends TimeQueryX { + override def toProtoV1: topoV1.BaseQuery.TimeQuery = + topoV1.BaseQuery.TimeQuery.HeadState(com.google.protobuf.empty.Empty()) + } + final case class Snapshot(asOf: CantonTimestamp) extends TimeQueryX { + override def toProtoV1: topoV1.BaseQuery.TimeQuery = + topoV1.BaseQuery.TimeQuery.Snapshot(asOf.toProtoPrimitive) + } + final case class Range(from: Option[CantonTimestamp], until: Option[CantonTimestamp]) + extends TimeQueryX { + override def toProtoV1: topoV1.BaseQuery.TimeQuery = topoV1.BaseQuery.TimeQuery.Range( + topoV1.BaseQuery.TimeRange(from.map(_.toProtoPrimitive), until.map(_.toProtoPrimitive)) + ) + } + + def fromProto( + proto: topoV1.BaseQuery.TimeQuery, + fieldName: String, + ): ParsingResult[TimeQueryX] = + proto match { + case topoV1.BaseQuery.TimeQuery.Empty => + Left(ProtoDeserializationError.FieldNotSet(fieldName)) + case topoV1.BaseQuery.TimeQuery.Snapshot(value) => + CantonTimestamp.fromProtoPrimitive(value).map(Snapshot) + case topoV1.BaseQuery.TimeQuery.HeadState(_) => Right(HeadState) + case topoV1.BaseQuery.TimeQuery.Range(value) => + for { + fromO <- value.from.traverse(CantonTimestamp.fromProtoPrimitive) + toO <- value.until.traverse(CantonTimestamp.fromProtoPrimitive) + } yield Range(fromO, toO) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollection.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollection.scala new file mode 100644 index 0000000000..89cbbb7092 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollection.scala @@ -0,0 +1,298 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.store + +import cats.syntax.functorFilter.* +import cats.syntax.parallel.* +import cats.syntax.traverse.* +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.v0 +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.processing.{ + AuthorizedTopologyTransaction, + EffectiveTime, + SequencedTime, +} +import com.digitalasset.canton.topology.transaction.TopologyChangeOp.{ + Add, + Positive, + Remove, + Replace, +} +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.util.FutureInstances.* +import com.digitalasset.canton.version.* + +import scala.concurrent.{ExecutionContext, Future} + +final case class StoredTopologyTransactions[+Op <: TopologyChangeOp]( + result: Seq[StoredTopologyTransaction[Op]] +) extends HasVersionedWrapper[StoredTopologyTransactions[TopologyChangeOp]] + with PrettyPrinting { + + override protected def companionObj = StoredTopologyTransactions + + override def pretty: Pretty[StoredTopologyTransactions.this.type] = prettyOfParam( + _.result + ) + + def toTopologyState: List[TopologyStateElement[TopologyMapping]] = + result.map(_.transaction.transaction.element).toList + + def toDomainTopologyTransactions: Seq[SignedTopologyTransaction[Op]] = + result.map(_.transaction) + + def toProtoV0: v0.TopologyTransactions = v0.TopologyTransactions( + items = result.map { item => + v0.TopologyTransactions.Item( + sequenced = Some(item.sequenced.toProtoPrimitive), + validFrom = Some(item.validFrom.toProtoPrimitive), + validUntil = item.validUntil.map(_.toProtoPrimitive), + // these transactions are serialized as versioned topology transactions + transaction = item.transaction.getCryptographicEvidence, + ) + } + ) + + def toAuthorizedTopologyTransactions[T <: TopologyMapping]( + collector: PartialFunction[TopologyMapping, T] + ): Seq[AuthorizedTopologyTransaction[T]] = { + + val transactions: Seq[SignedTopologyTransaction[TopologyChangeOp]] = result.map(_.transaction) + + transactions.flatMap { + case sit @ SignedTopologyTransaction( + TopologyStateUpdate(Add, TopologyStateUpdateElement(_, mapping)), + key, + _, + ) => + collector + .lift(mapping) + .map { matched => + AuthorizedTopologyTransaction(sit.uniquePath, matched, sit) + } + .toList + case _ => Seq() + } + } + + def collectOfType[T <: TopologyChangeOp](implicit + checker: TopologyChangeOp.OpTypeChecker[T] + ): StoredTopologyTransactions[T] = StoredTopologyTransactions( + result.mapFilter(TopologyChangeOp.select[T]) + ) + + def split: ( + StoredTopologyTransactions[Add], + StoredTopologyTransactions[Remove], + StoredTopologyTransactions[Replace], + ) = { + val (adds, removes, replaces) = TopologyTransactionSplitter[Op, StoredTopologyTransaction]( + collection = result, + opProjector = _.transaction.operation, + addSelector = TopologyChangeOp.select[Add](_), + removeSelector = TopologyChangeOp.select[Remove](_), + replaceSelector = TopologyChangeOp.select[Replace](_), + ) + + ( + StoredTopologyTransactions(adds), + StoredTopologyTransactions(removes), + StoredTopologyTransactions(replaces), + ) + } + + def positiveTransactions: PositiveStoredTopologyTransactions = { + val (adds, _, replaces) = split + PositiveStoredTopologyTransactions(adds, replaces) + } + + /** Split transactions into certificates and everything else (used when uploading to a participant) */ + def splitCertsAndRest: StoredTopologyTransactions.CertsAndRest[Op] = { + val certTypes = Set( + DomainTopologyTransactionType.IdentifierDelegation, + DomainTopologyTransactionType.NamespaceDelegation, + ) + val empty = Seq.empty[StoredTopologyTransaction[Op]] + val (certs, rest) = result.foldLeft((empty, empty)) { case ((certs, rest), tx) => + if (certTypes.contains(tx.transaction.uniquePath.dbType)) + (certs :+ tx, rest) + else + (certs, rest :+ tx) + } + StoredTopologyTransactions.CertsAndRest(certs, rest) + } + + /** The timestamp of the last topology transaction (if there is at least one) + * adjusted by topology change delay + */ + def lastChangeTimestamp: Option[CantonTimestamp] = result + .map(_.sequenced.value) + .maxOption +} + +object StoredTopologyTransactions + extends HasVersionedMessageCompanion[ + StoredTopologyTransactions[TopologyChangeOp], + ] { + + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> ProtoCodec( + ProtocolVersion.v30, + supportedProtoVersion(v0.TopologyTransactions)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + def fromProtoV0( + value: v0.TopologyTransactions + ): ParsingResult[StoredTopologyTransactions[TopologyChangeOp]] = { + def parseItem( + item: v0.TopologyTransactions.Item + ): ParsingResult[StoredTopologyTransaction[TopologyChangeOp]] = { + for { + sequenced <- ProtoConverter.parseRequired( + SequencedTime.fromProtoPrimitive, + "sequenced", + item.sequenced, + ) + validFrom <- ProtoConverter.parseRequired( + EffectiveTime.fromProtoPrimitive, + "valid_from", + item.validFrom, + ) + validUntil <- item.validFrom.traverse(EffectiveTime.fromProtoPrimitive) + transaction <- SignedTopologyTransaction.fromByteString(item.transaction) + } yield StoredTopologyTransaction( + sequenced, + validFrom, + validUntil, + transaction, + ) + } + value.items + .traverse(parseItem) + .map(StoredTopologyTransactions(_)) + } + + final case class CertsAndRest[+Op <: TopologyChangeOp]( + certs: Seq[StoredTopologyTransaction[Op]], + rest: Seq[StoredTopologyTransaction[Op]], + ) + + def empty[Op <: TopologyChangeOp]: StoredTopologyTransactions[Op] = + StoredTopologyTransactions(Seq()) + + override def name: String = "topology transactions" +} + +final case class PositiveStoredTopologyTransactions( + adds: StoredTopologyTransactions[Add], + replaces: StoredTopologyTransactions[Replace], +) { + def toIdentityState: List[TopologyStateElement[TopologyMapping]] = + adds.toTopologyState ++ replaces.toTopologyState + + def combine: StoredTopologyTransactions[Positive] = StoredTopologyTransactions( + adds.result ++ replaces.result + ) + + def signedTransactions: PositiveSignedTopologyTransactions = PositiveSignedTopologyTransactions( + SignedTopologyTransactions(adds.toDomainTopologyTransactions), + SignedTopologyTransactions(replaces.toDomainTopologyTransactions), + ) +} + +final case class SignedTopologyTransactions[+Op <: TopologyChangeOp]( + result: Seq[SignedTopologyTransaction[Op]] +) { + def isEmpty: Boolean = result.isEmpty + def size: Int = result.size + + def collectOfType[T <: TopologyChangeOp](implicit + checker: TopologyChangeOp.OpTypeChecker[T] + ): SignedTopologyTransactions[T] = SignedTopologyTransactions( + result.mapFilter(TopologyChangeOp.select[T]) + ) + + def split: ( + SignedTopologyTransactions[Add], + SignedTopologyTransactions[Remove], + SignedTopologyTransactions[Replace], + ) = { + val (adds, removes, replaces) = TopologyTransactionSplitter[Op, SignedTopologyTransaction]( + collection = result, + opProjector = _.operation, + addSelector = TopologyChangeOp.select[Add](_), + removeSelector = TopologyChangeOp.select[Remove](_), + replaceSelector = TopologyChangeOp.select[Replace](_), + ) + + ( + SignedTopologyTransactions(adds), + SignedTopologyTransactions(removes), + SignedTopologyTransactions(replaces), + ) + } + + def splitForStateUpdate + : (Seq[UniquePath], Seq[SignedTopologyTransaction[TopologyChangeOp.Positive]]) = { + val (adds, removes, replaces) = split + val deactivate = removes.result.map(_.uniquePath) ++ replaces.result.map(_.uniquePath) + val positive = adds.result ++ replaces.result + (deactivate, positive) + } + + def filter(predicate: SignedTopologyTransaction[Op] => Boolean): SignedTopologyTransactions[Op] = + this.copy(result = result.filter(predicate)) + + def filter( + predicate: SignedTopologyTransaction[Op] => Future[Boolean] + )(implicit executionContext: ExecutionContext): Future[SignedTopologyTransactions[Op]] = { + result.parTraverseFilter(tx => predicate(tx).map(Option.when(_)(tx))).map(this.copy) + } +} + +final case class PositiveSignedTopologyTransactions( + adds: SignedTopologyTransactions[Add], + replaces: SignedTopologyTransactions[Replace], +) { + def filter( + predicate: SignedTopologyTransaction[Positive] => Boolean + ): PositiveSignedTopologyTransactions = + this.copy(adds = adds.filter(predicate), replaces = replaces.filter(predicate)) +} + +object TopologyTransactionSplitter { + import TopologyChangeOp.* + + def apply[Op <: TopologyChangeOp, F[_ <: TopologyChangeOp]]( + collection: Seq[F[TopologyChangeOp]], + opProjector: F[TopologyChangeOp] => TopologyChangeOp, + addSelector: F[TopologyChangeOp] => Option[F[Add]], + removeSelector: F[TopologyChangeOp] => Option[F[Remove]], + replaceSelector: F[TopologyChangeOp] => Option[F[Replace]], + ): (Seq[F[Add]], Seq[F[Remove]], Seq[F[Replace]]) = { + + val (adds, removes, replaces) = { + ( + Vector.newBuilder[F[Add]], + Vector.newBuilder[F[Remove]], + Vector.newBuilder[F[Replace]], + ) + } + // normally, most of the txs are adds, so we preallocate the size + adds.sizeHint(collection.size) + collection + .map(e => (e, opProjector(e))) + .foreach { + case (element, Add) => addSelector(element).foreach(adds.addOne) + case (element, Remove) => removeSelector(element).foreach(removes.addOne) + case (element, Replace) => replaceSelector(element).foreach(replaces.addOne) + } + (adds.result(), removes.result(), replaces.result()) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollectionX.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollectionX.scala new file mode 100644 index 0000000000..7939eb7a69 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollectionX.scala @@ -0,0 +1,195 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.store + +import cats.syntax.functorFilter.* +import cats.syntax.traverse.* +import com.daml.nonempty.NonEmptyReturningOps.* +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.v0 +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} +import com.digitalasset.canton.topology.store.StoredTopologyTransactionX.GenericStoredTopologyTransactionX +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.version.* + +import scala.reflect.ClassTag + +final case class StoredTopologyTransactionsX[+Op <: TopologyChangeOpX, +M <: TopologyMappingX]( + result: Seq[StoredTopologyTransactionX[Op, M]] +) extends HasVersionedWrapper[StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX]] + with PrettyPrinting { + + override protected def companionObj = StoredTopologyTransactionsX + + override def pretty: Pretty[StoredTopologyTransactionsX.this.type] = prettyOfParam( + _.result + ) + + def toTopologyState: List[M] = + result.map(_.transaction.transaction.mapping).toList + + // note, we are reusing v0, as v0 just expects bytestrings ... + def toProtoV0: v0.TopologyTransactions = v0.TopologyTransactions( + items = result.map { item => + v0.TopologyTransactions.Item( + sequenced = Some(item.sequenced.toProtoPrimitive), + validFrom = Some(item.validFrom.toProtoPrimitive), + validUntil = item.validUntil.map(_.toProtoPrimitive), + // these transactions are serialized as versioned topology transactions + transaction = item.transaction.toByteString, + ) + } + ) + + def collectOfType[T <: TopologyChangeOpX: ClassTag]: StoredTopologyTransactionsX[T, M] = + StoredTopologyTransactionsX( + result.mapFilter(_.selectOp[T]) + ) + + def collectOfMapping[T <: TopologyMappingX: ClassTag]: StoredTopologyTransactionsX[Op, T] = + StoredTopologyTransactionsX( + result.mapFilter(_.selectMapping[T]) + ) + + def collectLatestByUniqueKey: StoredTopologyTransactionsX[Op, M] = + StoredTopologyTransactionsX( + result + .groupBy1(_.transaction.transaction.mapping.uniqueKey) + .view + .mapValues(_.last1) + .values + .toSeq + ) + + def signedTransactions: SignedTopologyTransactionsX[Op, M] = SignedTopologyTransactionsX( + result.map(_.transaction) + ) + + /** Split transactions into certificates and everything else (used when uploading to a participant) */ + def splitCertsAndRest: StoredTopologyTransactionsX.CertsAndRest = { + val certTypes = Set( + TopologyMappingX.Code.NamespaceDelegationX, + TopologyMappingX.Code.UnionspaceDefinitionX, + TopologyMappingX.Code.IdentifierDelegationX, + ) + val empty = Seq.empty[GenericStoredTopologyTransactionX] + val (certs, rest) = result.foldLeft((empty, empty)) { case ((certs, rest), tx) => + if (certTypes.contains(tx.transaction.transaction.mapping.code)) + (certs :+ tx, rest) + else + (certs, rest :+ tx) + } + StoredTopologyTransactionsX.CertsAndRest(certs, rest) + } + + /** The timestamp of the last topology transaction (if there is at least one) + * adjusted by topology change delay + */ + def lastChangeTimestamp: Option[CantonTimestamp] = result + .map(_.sequenced.value) + .maxOption + + def asSnapshotAtMaxEffectiveTime: StoredTopologyTransactionsX[Op, M] = { + result + .map(_.validFrom.value) + .maxOption + .map { maxEffective => + // all transactions with a validUntil > the maxEffective should set validUntil to None, to reflect + // the state of the transactions as of maxEffective + StoredTopologyTransactionsX(result.map { storedTx => + if (storedTx.validUntil.exists(_.value > maxEffective)) { + storedTx.copy(validUntil = None) + } else storedTx + }) + } + .getOrElse(this) // this case is triggered by `result` being empty + } +} + +object StoredTopologyTransactionsX + extends HasVersionedMessageCompanion[ + StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX], + ] { + + type GenericStoredTopologyTransactionsX = + StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX] + type PositiveStoredTopologyTransactionsX = + StoredTopologyTransactionsX[TopologyChangeOpX.Replace, TopologyMappingX] + + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> ProtoCodec( + ProtocolVersion.v30, + supportedProtoVersion(v0.TopologyTransactions)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + def fromProtoV0( + value: v0.TopologyTransactions + ): ParsingResult[GenericStoredTopologyTransactionsX] = { + def parseItem( + item: v0.TopologyTransactions.Item + ): ParsingResult[GenericStoredTopologyTransactionX] = { + for { + sequenced <- ProtoConverter.parseRequired( + SequencedTime.fromProtoPrimitive, + "sequenced", + item.sequenced, + ) + validFrom <- ProtoConverter.parseRequired( + EffectiveTime.fromProtoPrimitive, + "valid_from", + item.validFrom, + ) + validUntil <- item.validUntil.traverse(EffectiveTime.fromProtoPrimitive) + transaction <- SignedTopologyTransactionX.fromByteString(item.transaction) + } yield StoredTopologyTransactionX( + sequenced, + validFrom, + validUntil, + transaction, + ) + } + value.items + .traverse(parseItem) + .map(StoredTopologyTransactionsX(_)) + } + + final case class CertsAndRest( + certs: Seq[GenericStoredTopologyTransactionX], + rest: Seq[GenericStoredTopologyTransactionX], + ) + + def empty: GenericStoredTopologyTransactionsX = + StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX](Seq()) + + override def name: String = "topology transactions" +} + +final case class SignedTopologyTransactionsX[+Op <: TopologyChangeOpX, +M <: TopologyMappingX]( + result: Seq[SignedTopologyTransactionX[Op, M]] +) extends PrettyPrinting { + + override def pretty: Pretty[SignedTopologyTransactionsX.this.type] = prettyOfParam( + _.result + ) + + def collectOfType[T <: TopologyChangeOpX: ClassTag]: SignedTopologyTransactionsX[T, M] = + SignedTopologyTransactionsX( + result.mapFilter(_.selectOp[T]) + ) + + def collectOfMapping[T <: TopologyMappingX: ClassTag]: SignedTopologyTransactionsX[Op, T] = + SignedTopologyTransactionsX( + result.mapFilter(_.selectMapping[T]) + ) +} + +object SignedTopologyTransactionsX { + type PositiveSignedTopologyTransactionsX = + SignedTopologyTransactionsX[TopologyChangeOpX.Replace, TopologyMappingX] +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStore.scala new file mode 100644 index 0000000000..a6d1381024 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStore.scala @@ -0,0 +1,1022 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.store.db + +import cats.syntax.functorFilter.* +import cats.syntax.parallel.* +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString.DisplayName +import com.digitalasset.canton.config.CantonRequireTypes.{ + LengthLimitedString, + String185, + String255, + String300, +} +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.crypto.{Fingerprint, PublicKey} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.metrics.TimedLoadGauge +import com.digitalasset.canton.protocol.DynamicDomainParameters +import com.digitalasset.canton.resource.DbStorage.{DbAction, SQLActionBuilderChain} +import com.digitalasset.canton.resource.{DbStorage, DbStore} +import com.digitalasset.canton.time.NonNegativeFiniteDuration +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} +import com.digitalasset.canton.topology.store.TopologyStore.InsertTransaction +import com.digitalasset.canton.topology.store.* +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.FutureInstances.* +import com.digitalasset.canton.version.ProtocolVersion +import slick.jdbc.GetResult +import slick.jdbc.canton.SQLActionBuilder + +import scala.concurrent.{ExecutionContext, Future} + +class DbPartyMetadataStore( + override protected val storage: DbStorage, + override protected val timeouts: ProcessingTimeout, + override protected val loggerFactory: NamedLoggerFactory, +)(implicit + ec: ExecutionContext +) extends PartyMetadataStore + with DbStore { + + import DbStorage.Implicits.BuilderChain.* + import storage.api.* + + private val processingTime: TimedLoadGauge = + storage.metrics.loadGaugeM("party-metadata-store") + + override def metadataForParty( + partyId: PartyId + )(implicit traceContext: TraceContext): Future[Option[PartyMetadata]] = + processingTime.event { + storage + .query( + metadataForPartyQuery(sql"party_id = $partyId #${storage.limit(1)}"), + functionFullName, + ) + .map(_.headOption) + } + + private def metadataForPartyQuery( + where: SQLActionBuilderChain + ): DbAction.ReadOnly[Seq[PartyMetadata]] = { + + val query = + sql"select party_id, display_name, participant_id, submission_id, effective_at, notified from party_metadata where " ++ where + + for { + data <- query + .as[(PartyId, Option[String], Option[String], String255, CantonTimestamp, Boolean)] + } yield { + data.map { + case (partyId, displayNameS, participantIdS, submissionId, effectiveAt, notified) => + val participantId = + participantIdS + .flatMap(x => UniqueIdentifier.fromProtoPrimitive_(x).toOption) + .map(ParticipantId(_)) + val displayName = displayNameS.flatMap(String255.create(_).toOption) + PartyMetadata( + partyId, + displayName, + participantId = participantId, + )( + effectiveTimestamp = effectiveAt, + submissionId = submissionId, + notified = notified, + ) + } + } + } + + override def insertOrUpdatePartyMetadata( + partyId: PartyId, + participantId: Option[ParticipantId], + displayName: Option[DisplayName], + effectiveTimestamp: CantonTimestamp, + submissionId: String255, + )(implicit traceContext: TraceContext): Future[Unit] = + processingTime.event { + val participantS = dbValue(participantId) + val query = storage.profile match { + case _: DbStorage.Profile.Postgres => + sqlu"""insert into party_metadata (party_id, display_name, participant_id, submission_id, effective_at) + VALUES ($partyId, $displayName, $participantS, $submissionId, $effectiveTimestamp) + on conflict (party_id) do update + set + display_name = $displayName, + participant_id = $participantS, + submission_id = $submissionId, + effective_at = $effectiveTimestamp, + notified = false + """ + case _: DbStorage.Profile.H2 | _: DbStorage.Profile.Oracle => + sqlu"""merge into party_metadata + using dual + on (party_id = $partyId) + when matched then + update set + display_name = $displayName, + participant_id = $participantS, + submission_id = $submissionId, + effective_at = $effectiveTimestamp, + notified = ${false} + when not matched then + insert (party_id, display_name, participant_id, submission_id, effective_at) + values ($partyId, $displayName, $participantS, $submissionId, $effectiveTimestamp) + """ + } + storage.update_(query, functionFullName) + } + + private def dbValue(participantId: Option[ParticipantId]): Option[String300] = + participantId.map(_.uid.toLengthLimitedString.asString300) + + /** mark the given metadata has having been successfully forwarded to the domain */ + override def markNotified( + metadata: PartyMetadata + )(implicit traceContext: TraceContext): Future[Unit] = processingTime.event { + val partyId = metadata.partyId + val effectiveAt = metadata.effectiveTimestamp + val query = + sqlu"UPDATE party_metadata SET notified = ${true} WHERE party_id = $partyId and effective_at = $effectiveAt" + storage.update_(query, functionFullName) + } + + /** fetch the current set of party data which still needs to be notified */ + override def fetchNotNotified()(implicit traceContext: TraceContext): Future[Seq[PartyMetadata]] = + processingTime.event { + storage + .query( + metadataForPartyQuery(sql"notified = ${false}"), + functionFullName, + ) + } + +} + +trait DbTopologyStoreCommon[+StoreId <: TopologyStoreId] extends NamedLogging { + this: TopologyStoreCommon[StoreId, ?, ?, ?] & DbStore => + + import DbStorage.Implicits.BuilderChain.* + import storage.api.* + + protected def maxItemsInSqlQuery: PositiveInt + protected def transactionStoreIdName: LengthLimitedString + protected def updatingTime: TimedLoadGauge + protected def readTime: TimedLoadGauge + + override def currentDispatchingWatermark(implicit + traceContext: TraceContext + ): Future[Option[CantonTimestamp]] = { + val query = + sql"SELECT watermark_ts FROM topology_dispatching WHERE store_id =$transactionStoreIdName" + .as[CantonTimestamp] + .headOption + readTime.event { + storage.query(query, functionFullName) + } + } + + override def updateDispatchingWatermark(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[Unit] = { + val query = storage.profile match { + case _: DbStorage.Profile.Postgres => + sqlu"""insert into topology_dispatching (store_id, watermark_ts) + VALUES ($transactionStoreIdName, $timestamp) + on conflict (store_id) do update + set + watermark_ts = $timestamp + """ + case _: DbStorage.Profile.H2 | _: DbStorage.Profile.Oracle => + sqlu"""merge into topology_dispatching + using dual + on (store_id = $transactionStoreIdName) + when matched then + update set + watermark_ts = $timestamp + when not matched then + insert (store_id, watermark_ts) + values ($transactionStoreIdName, $timestamp) + """ + } + updatingTime.event { + storage.update_(query, functionFullName) + } + } + + protected def asOfQuery(asOf: CantonTimestamp, asOfInclusive: Boolean): SQLActionBuilder = + if (asOfInclusive) + sql" AND valid_from <= $asOf AND (valid_until is NULL OR $asOf < valid_until)" + else + sql" AND valid_from < $asOf AND (valid_until is NULL OR $asOf <= valid_until)" + + protected def getHeadStateQuery( + recentTimestampO: Option[CantonTimestamp] + ): SQLActionBuilderChain = recentTimestampO match { + case Some(value) => asOfQuery(value, asOfInclusive = false) + case None => sql" AND valid_until is NULL" + } + + @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) + protected def andIdFilter( + previousFilter: SQLActionBuilderChain, + idFilter: String, + namespaceOnly: Boolean, + ): SQLActionBuilderChain = if (idFilter.isEmpty) previousFilter + else if (namespaceOnly) { + previousFilter ++ sql" AND namespace LIKE ${idFilter + "%"}" + } else { + val (prefix, suffix) = UniqueIdentifier.splitFilter(idFilter, "%") + val tmp = previousFilter ++ sql" AND identifier like $prefix " + if (suffix.sizeCompare(1) > 0) { + tmp ++ sql" AND namespace like $suffix " + } else + tmp + } + +} + +class DbTopologyStore[StoreId <: TopologyStoreId]( + override protected val storage: DbStorage, + val storeId: StoreId, + override protected val timeouts: ProcessingTimeout, + override protected val loggerFactory: NamedLoggerFactory, + futureSupervisor: FutureSupervisor, + override protected val maxItemsInSqlQuery: PositiveInt = PositiveInt.tryCreate(100), +)(implicit val ec: ExecutionContext) + extends TopologyStore[StoreId] + with DbTopologyStoreCommon[StoreId] + with DbStore { + + import DbStorage.Implicits.BuilderChain.* + import storage.api.* + import storage.converters.* + + private implicit val getResultSignedTopologyTransaction + : GetResult[SignedTopologyTransaction[TopologyChangeOp]] = + SignedTopologyTransaction.createGetResultDomainTopologyTransaction + + protected val (transactionStoreIdName, stateStoreIdFilterName) = buildTransactionStoreNames( + storeId + ) + private val isDomainStore = storeId match { + case TopologyStoreId.DomainStore(_, _) => true + case _ => false + } + + protected val updatingTime: TimedLoadGauge = + storage.metrics.loadGaugeM("topology-store-update") + protected val readTime: TimedLoadGauge = + storage.metrics.loadGaugeM("topology-store-read") + + private def buildTransactionStoreNames( + storeId: TopologyStoreId + ): (LengthLimitedString, LengthLimitedString) = ( + storeId match { + case TopologyStoreId.DomainStore(_domainId, discriminator) if discriminator.isEmpty => + storeId.dbString + case TopologyStoreId.DomainStore(_domainId, _discriminator) => + storeId.dbStringWithDaml2xUniquifier("T") + case TopologyStoreId.AuthorizedStore => + storeId.dbString + }, + storeId.dbStringWithDaml2xUniquifier("S"), + ) + + private def pathQuery(uniquePath: UniquePath): SQLActionBuilder = { + + val dbType = uniquePath.dbType + val namespace = uniquePath.namespace + + sql"(transaction_type = $dbType AND namespace = $namespace" ++ + uniquePath.maybeUid + .map { uid => + val identifier = uid.id + sql" AND identifier = $identifier" + } + .getOrElse(sql"") ++ + uniquePath.maybeElementId + .map { elementId => + sql" AND element_id = $elementId" + } + .getOrElse(sql"") ++ sql")" + + } + + override def append( + sequenced: SequencedTime, + effective: EffectiveTime, + transactions: Seq[ValidatedTopologyTransaction], + )(implicit traceContext: TraceContext): Future[Unit] = { + val (updates, appends) = TopologyStore.appends(effective.value, transactions) + updateAndInsert(transactionStoreIdName, sequenced, effective, updates.toSeq, appends) + } + + private def updateAndInsert( + store: LengthLimitedString, + sequenced: SequencedTime, + effective: EffectiveTime, + deactivate: Seq[UniquePath], + add: Seq[InsertTransaction], + )(implicit traceContext: TraceContext): Future[Unit] = { + + val sequencedTs = sequenced.value + val effectiveTs = effective.value + val updateSeq = deactivate.toList.map(pathQuery) + val appendSeq = add.toList + .map { case InsertTransaction(transaction, validUntil, reasonT) => + val operation = transaction.operation + val transactionType = transaction.uniquePath.dbType + val namespace = transaction.uniquePath.namespace + val identifier = + transaction.uniquePath.maybeUid.map(_.id.toLengthLimitedString).getOrElse(String185.empty) + val elementId = + transaction.uniquePath.maybeElementId.fold(String255.empty)(_.toLengthLimitedString) + val reason = reasonT.map(_.asString1GB) + val secondary = + transaction.transaction.element.mapping.secondaryUid.map(x => (x.id, x.namespace)) + if ( + transaction.transaction.element.mapping.requiredAuth.uids.length > 1 && secondary.isEmpty + ) { + logger.warn("I would expect to see a secondary uid here, but there is none.") + } + val representativeProtocolVersion = transaction.transaction.representativeProtocolVersion + val (secondaryId, secondaryNs) = secondary.unzip + storage.profile match { + case _: DbStorage.Profile.Oracle => + sql"SELECT $store, $sequencedTs, $effectiveTs, $validUntil, $transactionType, $namespace, $identifier, $elementId, $secondaryNs, $secondaryId, $operation, $transaction, $reason, $representativeProtocolVersion FROM dual" + case _ => + sql"($store, $sequencedTs, $effectiveTs, $validUntil, $transactionType, $namespace, $identifier, $elementId, $secondaryNs, $secondaryId, $operation, $transaction, $reason, $representativeProtocolVersion)" + } + } + + lazy val updateAction = + (sql"UPDATE topology_transactions SET valid_until = $effectiveTs WHERE store_id = $store AND (" ++ + updateSeq + .intercalate( + sql" OR " + ) ++ sql") AND valid_until is null AND valid_from < $effectiveTs").asUpdate + + val insertAction = storage.profile match { + case _: DbStorage.Profile.Postgres | _: DbStorage.Profile.H2 => + (sql"""INSERT INTO topology_transactions (store_id, sequenced, valid_from, valid_until, transaction_type, namespace, + identifier, element_id, secondary_namespace, secondary_identifier, operation, instance, ignore_reason, representative_protocol_version) VALUES""" ++ + appendSeq.intercalate(sql", ") ++ sql" ON CONFLICT DO NOTHING").asUpdate + case _: DbStorage.Profile.Oracle => + (sql"""INSERT + /*+ IGNORE_ROW_ON_DUPKEY_INDEX ( TOPOLOGY_TRANSACTIONS (store_id, transaction_type, namespace, identifier, element_id, valid_from, operation, representative_protocol_version) ) */ + INTO topology_transactions (store_id, sequenced, valid_from, valid_until, transaction_type, namespace, identifier, element_id, + secondary_namespace, secondary_identifier, operation, instance, ignore_reason, representative_protocol_version) + WITH UPDATES AS (""" ++ + appendSeq.intercalate(sql" UNION ALL ") ++ + sql") SELECT * FROM UPDATES").asUpdate + } + + updatingTime.event { + storage.update_( + dbioSeq(Seq((updateSeq.nonEmpty, updateAction), (add.nonEmpty, insertAction))), + operationName = "append-topology-transactions", + ) + } + } + + private def dbioSeq[E <: Effect]( + actions: Seq[(Boolean, DBIOAction[_, NoStream, E])] + ): DBIOAction[Unit, NoStream, E] = DBIO.seq(actions.collect { + case (filter, action) if filter => action + }: _*) + + private def queryForTransactions( + store: LengthLimitedString, + subQuery: SQLActionBuilder, + limit: String = "", + orderBy: String = " ORDER BY id ", + includeRejected: Boolean = false, + )(implicit + traceContext: TraceContext + ): Future[StoredTopologyTransactions[TopologyChangeOp]] = { + val query = + sql"SELECT id, instance, sequenced, valid_from, valid_until FROM topology_transactions WHERE store_id = $store " ++ + subQuery ++ (if (!includeRejected) sql" AND ignore_reason IS NULL" + else sql"") ++ sql" #${orderBy} #${limit}" + readTime.event { + storage + .query( + query.as[ + ( + Long, + SignedTopologyTransaction[TopologyChangeOp], + Option[CantonTimestamp], + CantonTimestamp, + Option[CantonTimestamp], + ) + ], + functionFullName, + ) + .flatMap(_.toList.parTraverse { case (id, dt, sequencedTsO, validFrom, validUntil) => + getOrComputeSequencedTime(store, id, sequencedTsO, validFrom).map { sequencedTs => + StoredTopologyTransaction( + SequencedTime(sequencedTs), + EffectiveTime(validFrom), + validUntil.map(EffectiveTime(_)), + dt, + ) + } + }) + .map(StoredTopologyTransactions(_)) + } + } + + // TODO(#15208) remove once we move to 3.0 + /** Backwards compatible computation of sequencing time + * + * The algorithm works based on the assumption that the topology manager has not sent + * an epsilon change that would lead to reordering of topology transactions. + * + * Let's assume we have parameter changes at (t3,e3), (t2, e2), (t1, e1), default: e0 = 0 + * with ti being the effective time + * + * Then, for a t4, we know that if (t4 - t3) > e3, then t4 was sequenced at t4 - e3. Otherwise, we repeat + * with checking t4 against t2 and e2 etc. + */ + private def getOrComputeSequencedTime( + store: LengthLimitedString, + id: Long, + sequencedO: Option[CantonTimestamp], + validFrom: CantonTimestamp, + )(implicit + traceContext: TraceContext + ): Future[CantonTimestamp] = + if (!isDomainStore) + Future.successful(sequencedO.getOrElse(validFrom)) // only compute for domain stores + else { + def getParameterChangeBefore( + ts: CantonTimestamp + ): Future[Option[(CantonTimestamp, NonNegativeFiniteDuration)]] = { + val typ = DomainTopologyTransactionType.DomainParameters + // this is recursive, but terminates as we descend in time strictly. + // It is also stack safe as trampolined by a `Future.flatmap` inside queryForTransactions. + queryForTransactions( + store, + sql" AND transaction_type = ${typ} and valid_from < $ts", + limit = storage.limit(1), + orderBy = " ORDER BY valid_from DESC", + ).map( + _.result.map(x => (x.validFrom, x.transaction.transaction.element.mapping)).collectFirst { + case (ts, change: DomainParametersChange) => + (ts.value, change.domainParameters.topologyChangeDelay) + } + ) + } + def go(before: CantonTimestamp): Future[CantonTimestamp] = { + getParameterChangeBefore(before).flatMap { + case None => + // there is no parameter change before, so we use the default (which is 0) + Future.successful(validFrom - DynamicDomainParameters.topologyChangeDelayIfAbsent) + case Some((ts, epsilon)) => + val delta = validFrom - ts + // check if (teffective - teffchange) > epsilon + if (delta.compareTo(epsilon.duration) > 0) { + Future.successful(validFrom - epsilon) + } else { + go(ts) + } + } + } + sequencedO.map(Future.successful).getOrElse { + go(validFrom).flatMap { sequenced => + logger.info( + s"Updating legacy topology transaction id=${id} with effective=${validFrom} to sequenced time=${sequenced}" + ) + val query = + sqlu"UPDATE topology_transactions SET sequenced = ${sequenced} WHERE id = $id AND store_id = $store" + storage.update_(query, functionFullName).map(_ => sequenced) + } + } + } + + override def timestamp( + useStateStore: Boolean + )(implicit traceContext: TraceContext): Future[Option[(SequencedTime, EffectiveTime)]] = { + val storeId = if (useStateStore) stateStoreIdFilterName else transactionStoreIdName + queryForTransactions(storeId, sql"", storage.limit(1), orderBy = " ORDER BY id DESC") + .map(_.result.headOption.map(tx => (tx.sequenced, tx.validFrom))) + } + + override def headTransactions(implicit + traceContext: TraceContext + ): Future[StoredTopologyTransactions[TopologyChangeOp.Positive]] = + queryForTransactions( + transactionStoreIdName, + sql" AND valid_until is NULL and (operation = ${TopologyChangeOp.Add} or operation = ${TopologyChangeOp.Replace})", + ).map(_.collectOfType[TopologyChangeOp.Positive]) + + override def findRemovalTransactionForMappings( + mappings: Set[TopologyStateElement[TopologyMapping]] + )(implicit + traceContext: TraceContext + ): Future[Seq[SignedTopologyTransaction[TopologyChangeOp.Remove]]] = + if (mappings.isEmpty) Future.successful(Seq.empty) + else { + + val mappingsQuery = mappings + .collect { case x: TopologyStateUpdateElement => pathQuery(x.uniquePath) } + .toList + .intercalate(sql" OR ") + + queryForTransactions( + transactionStoreIdName, + sql" AND operation = ${TopologyChangeOp.Remove} AND (" ++ mappingsQuery ++ sql")", + ).map( + _.result + .map(_.transaction) + .mapFilter(TopologyChangeOp.select[TopologyChangeOp.Remove]) + ) + } + + override def findPositiveTransactionsForMapping( + mapping: TopologyMapping + )(implicit + traceContext: TraceContext + ): Future[Seq[SignedTopologyTransaction[TopologyChangeOp.Positive]]] = { + val tmp = TopologyElementId.tryCreate("1") + val ns = mapping.uniquePath(tmp).namespace + val query = mapping.uniquePath(tmp).maybeUid.map(_.id) match { + case None => sql" AND namespace = $ns" + case Some(identifier) => sql" AND namespace = $ns AND identifier = $identifier" + } + queryForTransactions( + transactionStoreIdName, + sql" AND valid_until is NULL AND transaction_type = ${mapping.dbType}" ++ query, + ) + .map { x => + x.positiveTransactions.combine.result.collect { + case storedTx if storedTx.transaction.transaction.element.mapping == mapping => + storedTx.transaction + } + } + } + + override def allTransactions(includeRejected: Boolean = false)(implicit + traceContext: TraceContext + ): Future[StoredTopologyTransactions[TopologyChangeOp]] = + queryForTransactions(transactionStoreIdName, sql"", includeRejected = includeRejected) + + @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) + override def inspectKnownParties( + timestamp: CantonTimestamp, + filterParty: String, + filterParticipant: String, + limit: Int, + )(implicit + traceContext: TraceContext + ): Future[Set[PartyId]] = { + val p2pm = DomainTopologyTransactionType.PartyToParticipant + val pdsm = DomainTopologyTransactionType.ParticipantState + val (filterPartyIdentifier, filterPartyNamespace) = + UniqueIdentifier.splitFilter(filterParty, "%") + val (filterParticipantIdentifier, filterParticipantNamespace) = + UniqueIdentifier.splitFilter(filterParticipant, "%") + val limitS = storage.limit(limit) + val query = + sql""" + SELECT identifier, namespace FROM topology_transactions WHERE store_id = $stateStoreIdFilterName + AND valid_from < $timestamp AND (valid_until IS NULL OR $timestamp <= valid_until) + AND ( + (transaction_type = $p2pm AND identifier LIKE $filterPartyIdentifier AND namespace LIKE $filterPartyNamespace + AND secondary_identifier LIKE $filterParticipantIdentifier AND secondary_namespace LIKE $filterParticipantNamespace) + OR (transaction_type = $pdsm AND identifier LIKE $filterPartyIdentifier AND namespace LIKE $filterPartyNamespace + AND identifier LIKE $filterParticipantIdentifier AND namespace LIKE $filterParticipantNamespace) + ) AND ignore_reason IS NULL GROUP BY (identifier, namespace) #${limitS}""" + readTime.event { + storage + .query( + query.as[ + (String, String) + ], + functionFullName, + ) + .map(_.map { case (id, ns) => + PartyId(UniqueIdentifier(Identifier.tryCreate(id), Namespace(Fingerprint.tryCreate(ns)))) + }.toSet) + } + } + + /** query optimized for inspection */ + override def inspect( + stateStore: Boolean, + timeQuery: TimeQuery, + recentTimestampO: Option[CantonTimestamp], + ops: Option[TopologyChangeOp], + typ: Option[DomainTopologyTransactionType], + idFilter: String, + namespaceOnly: Boolean, + )(implicit + traceContext: TraceContext + ): Future[StoredTopologyTransactions[TopologyChangeOp]] = { + val storeId = { + if (stateStore) + stateStoreIdFilterName + else transactionStoreIdName + } + val query1: SQLActionBuilderChain = timeQuery match { + case TimeQuery.HeadState => + getHeadStateQuery(recentTimestampO) + case TimeQuery.Snapshot(asOf) => + asOfQuery(asOf = asOf, asOfInclusive = false) + case TimeQuery.Range(None, None) => + sql"" // The case below insert an additional `AND` that we don't want + case TimeQuery.Range(from, until) => + sql" AND " ++ ((from.toList.map(ts => sql"valid_from >= $ts") ++ until.toList.map(ts => + sql" valid_from <= $ts" + )) + .intercalate(sql" AND ")) + } + val query2 = ops match { + case Some(value) => + query1 ++ sql" AND operation = $value" + case None => query1 + } + + val query3 = andIdFilter(query2, idFilter, namespaceOnly) + + val query4 = typ match { + case Some(value) => query3 ++ sql" AND transaction_type = $value" + case None => query3 + } + queryForTransactions(storeId, query4) + } + + private def findStoredSql( + transaction: TopologyTransaction[TopologyChangeOp], + subQuery: SQLActionBuilder = sql"", + includeRejected: Boolean = false, + )(implicit + traceContext: TraceContext + ): Future[StoredTopologyTransactions[TopologyChangeOp]] = + queryForTransactions( + transactionStoreIdName, + sql" AND" ++ pathQuery( + transaction.element.uniquePath + ) ++ sql" AND operation = ${transaction.op}" ++ subQuery, + includeRejected = includeRejected, + ) + + override def findStored( + transaction: SignedTopologyTransaction[TopologyChangeOp], + includeRejected: Boolean = false, + )(implicit + traceContext: TraceContext + ): Future[Option[StoredTopologyTransaction[TopologyChangeOp]]] = + findStoredSql(transaction.transaction, includeRejected = includeRejected).map( + _.result.headOption + ) + + override def findStoredNoSignature( + transaction: TopologyTransaction[TopologyChangeOp] + )(implicit + traceContext: TraceContext + ): Future[Seq[StoredTopologyTransaction[TopologyChangeOp]]] = + findStoredSql(transaction).map(_.result) + + override def findStoredForVersion( + transaction: TopologyTransaction[TopologyChangeOp], + protocolVersion: ProtocolVersion, + )(implicit + traceContext: TraceContext + ): Future[Option[StoredTopologyTransaction[TopologyChangeOp]]] = { + val representativeProtocolVersion = + TopologyTransaction.protocolVersionRepresentativeFor(protocolVersion) + findStoredSql( + transaction, + sql" AND representative_protocol_version = $representativeProtocolVersion", + ).map(_.result.headOption) + } + + /** query interface used by [[com.digitalasset.canton.topology.client.StoreBasedTopologySnapshot]] */ + override def findPositiveTransactions( + asOf: CantonTimestamp, + asOfInclusive: Boolean, + includeSecondary: Boolean, + types: Seq[DomainTopologyTransactionType], + filterUid: Option[Seq[UniqueIdentifier]], + filterNamespace: Option[Seq[Namespace]], + )(implicit + traceContext: TraceContext + ): Future[PositiveStoredTopologyTransactions] = + findPositiveTransactionsInStore( + transactionStoreIdName, + asOf, + asOfInclusive, + includeSecondary, + types, + filterUid, + filterNamespace, + ) + + /** batching (by filterUid) version of finding transactions in store */ + private def findTransactionsInStore( + store: LengthLimitedString, + asOf: CantonTimestamp, + asOfInclusive: Boolean, + includeSecondary: Boolean, + types: Seq[DomainTopologyTransactionType], + filterUid: Option[Seq[UniqueIdentifier]], + filterNamespace: Option[Seq[Namespace]], + filterOps: Seq[TopologyChangeOp], + )(implicit + traceContext: TraceContext + ): Future[StoredTopologyTransactions[TopologyChangeOp]] = { + def forward(filterUidsNew: Option[Seq[UniqueIdentifier]]) = + findTransactionsInStoreRaw( + store, + asOf, + asOfInclusive, + includeSecondary, + types, + filterUidsNew, + filterNamespace, + filterOps, + ) + filterUid match { + case None => forward(None) + case Some(uids) if uids.sizeCompare(maxItemsInSqlQuery.value) < 0 => forward(filterUid) + case Some(uids) => + uids + .grouped(maxItemsInSqlQuery.value) + .toList + .parTraverse(lessUids => forward(Some(lessUids))) + .map(all => StoredTopologyTransactions(all.flatMap(_.result))) + } + } + + /** unbatching version of finding transactions in store */ + private def findTransactionsInStoreRaw( + store: LengthLimitedString, + asOf: CantonTimestamp, + asOfInclusive: Boolean, + includeSecondary: Boolean, + types: Seq[DomainTopologyTransactionType], + filterUid: Option[Seq[UniqueIdentifier]], + filterNamespace: Option[Seq[Namespace]], + filterOps: Seq[TopologyChangeOp], + )(implicit + traceContext: TraceContext + ): Future[StoredTopologyTransactions[TopologyChangeOp]] = { + { + { + val hasUidFilter = filterUid.nonEmpty || filterNamespace.nonEmpty + val count = + filterUid.map(_.length).getOrElse(0) + filterNamespace.map(_.length).getOrElse(0) + if (hasUidFilter && count == 0) { + Future.successful(StoredTopologyTransactions.empty[TopologyChangeOp.Add]) + } else { + val rangeQuery = asOfQuery(asOf, asOfInclusive) + val opFilter = filterOps.map(op => sql"operation = $op").intercalate(sql" or ") + val baseQuery = + sql" AND (" ++ opFilter ++ sql") AND transaction_type IN (" ++ types.toList + .map(s => sql"$s") + .intercalate(sql", ") ++ sql")" + + val pathQuery: SQLActionBuilderChain = + if (!hasUidFilter) sql"" + else { + def genFilters(identifier: String, namespace: String): SQLActionBuilderChain = { + val filterUidQ = + filterUid + .map(_.filterNot(uid => filterNamespace.exists(_.contains(uid.namespace)))) + .toList + .flatMap( + _.map(uid => + sql"(#$identifier = ${uid.id} AND #$namespace = ${uid.namespace})" + ) + ) + val filterNsQ = + filterNamespace.toList + .flatMap(_.map(ns => sql"(#$namespace = $ns)")) + SQLActionBuilderChain(filterUidQ) ++ SQLActionBuilderChain(filterNsQ) + } + val plainFilter = genFilters("identifier", "namespace") + val filters = if (includeSecondary) { + plainFilter ++ genFilters("secondary_identifier", "secondary_namespace") + } else plainFilter + sql" AND (" ++ filters.intercalate(sql" OR ") ++ sql")" + } + queryForTransactions(store, rangeQuery ++ baseQuery ++ pathQuery) + } + } + } + } + + private def findPositiveTransactionsInStore( + store: LengthLimitedString, + asOf: CantonTimestamp, + asOfInclusive: Boolean, + includeSecondary: Boolean, + types: Seq[DomainTopologyTransactionType], + filterUid: Option[Seq[UniqueIdentifier]], + filterNamespace: Option[Seq[Namespace]], + )(implicit traceContext: TraceContext): Future[PositiveStoredTopologyTransactions] = + findTransactionsInStore( + store = store, + asOf = asOf, + asOfInclusive = asOfInclusive, + includeSecondary = includeSecondary, + types = types, + filterUid = filterUid, + filterNamespace = filterNamespace, + filterOps = Seq(TopologyChangeOp.Add, TopologyChangeOp.Replace), + ).map(_.positiveTransactions) + + /** query interface used by DomainTopologyManager to find the set of initial keys */ + override def findInitialState( + id: DomainTopologyManagerId + )(implicit traceContext: TraceContext): Future[Map[Member, Seq[PublicKey]]] = { + val batchNum = 100 + def go( + offset: Long, + acc: Map[Member, Seq[PublicKey]], + ): Future[(Boolean, Map[Member, Seq[PublicKey]])] = { + val query = sql" AND operation = ${TopologyChangeOp.Add}" + val lm = storage.limit(batchNum, offset) + val start = (false, 0, acc) + queryForTransactions(transactionStoreIdName, query, lm) + .map(_.toDomainTopologyTransactions.foldLeft(start) { + case ((false, count, acc), transaction) => + val (bl, map) = TopologyStore.findInitialStateAccumulator(id.uid, acc, transaction) + (bl, count + 1, map) + case ((bl, count, map), _) => (bl, count + 1, map) + }) + .flatMap { + case (done, count, acc) if done || count < batchNum => Future.successful((done, acc)) + case (_, _, acc) => go(offset + batchNum, acc) + } + } + go(0, Map()).map(_._2) + } + + override def findStateTransactions( + asOf: CantonTimestamp, + asOfInclusive: Boolean, + includeSecondary: Boolean, + types: Seq[DomainTopologyTransactionType], + filterUid: Option[Seq[UniqueIdentifier]], + filterNamespace: Option[Seq[Namespace]], + )(implicit traceContext: TraceContext): Future[PositiveStoredTopologyTransactions] = + findPositiveTransactionsInStore( + stateStoreIdFilterName, + asOf, + asOfInclusive, + includeSecondary, + types, + filterUid, + filterNamespace, + ) + + override def updateState( + sequenced: SequencedTime, + effective: EffectiveTime, + deactivate: Seq[UniquePath], + positive: Seq[SignedTopologyTransaction[TopologyChangeOp.Positive]], + )(implicit traceContext: TraceContext): Future[Unit] = + updateAndInsert( + stateStoreIdFilterName, + sequenced, + effective, + deactivate, + positive.map { x => + InsertTransaction(x, None, None) + }, + ) + + override def findUpcomingEffectiveChanges(asOfInclusive: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[Seq[TopologyStore.Change]] = { + queryForTransactions( + transactionStoreIdName, + sql" AND valid_from >= $asOfInclusive ", + orderBy = " ORDER BY valid_from", + ).map(res => + TopologyStore.Change.accumulateUpcomingEffectiveChanges( + res.result + ) + ) + } + + override def currentDispatchingWatermark(implicit + traceContext: TraceContext + ): Future[Option[CantonTimestamp]] = { + val query = + sql"SELECT watermark_ts FROM topology_dispatching WHERE store_id =$transactionStoreIdName" + .as[CantonTimestamp] + .headOption + readTime.event { + storage.query(query, functionFullName) + } + } + + override def updateDispatchingWatermark(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[Unit] = { + val query = storage.profile match { + case _: DbStorage.Profile.Postgres => + sqlu"""insert into topology_dispatching (store_id, watermark_ts) + VALUES ($transactionStoreIdName, $timestamp) + on conflict (store_id) do update + set + watermark_ts = $timestamp + """ + case _: DbStorage.Profile.H2 | _: DbStorage.Profile.Oracle => + sqlu"""merge into topology_dispatching + using dual + on (store_id = $transactionStoreIdName) + when matched then + update set + watermark_ts = $timestamp + when not matched then + insert (store_id, watermark_ts) + values ($transactionStoreIdName, $timestamp) + """ + } + updatingTime.event { + storage.update_(query, functionFullName) + } + } + + override def findDispatchingTransactionsAfter( + timestampExclusive: CantonTimestamp, + limitO: Option[Int], + )(implicit traceContext: TraceContext): Future[StoredTopologyTransactions[TopologyChangeOp]] = { + val subQuery = + sql" AND valid_from > $timestampExclusive AND (valid_until is NULL OR operation = ${TopologyChangeOp.Remove})" + val limitQ = limitO.fold("")(storage.limit(_)) + queryForTransactions(transactionStoreIdName, subQuery, limitQ) + } + + override def findParticipantOnboardingTransactions( + participantId: ParticipantId, + domainId: DomainId, + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Seq[SignedTopologyTransaction[TopologyChangeOp]]] = { + val ns = participantId.uid.namespace + val subQuery = + sql" AND valid_until is NULL AND namespace = $ns AND transaction_type IN (" ++ TopologyStore.initialParticipantDispatchingSet.toList + .map(s => sql"$s") + .intercalate(sql", ") ++ sql")" + performUnlessClosingF("query-for-transactions")( + queryForTransactions(transactionStoreIdName, subQuery) + ) + .flatMap( + TopologyStore.filterInitialParticipantDispatchingTransactions( + participantId, + domainId, + this, + loggerFactory, + _, + timeouts, + futureSupervisor, + ) + ) + } + + override def findTsOfParticipantStateChangesBefore( + beforeExclusive: CantonTimestamp, + participantId: ParticipantId, + limit: Int, + )(implicit traceContext: TraceContext): Future[Seq[CantonTimestamp]] = { + val ns = participantId.uid.namespace + val id = participantId.uid.id + val subQuery = sql" AND valid_from < $beforeExclusive " ++ + sql" AND transaction_type = ${DomainTopologyTransactionType.ParticipantState} " ++ + sql" AND namespace = $ns AND identifier = $id " + val limitQ = storage.limit(limit) + queryForTransactions( + transactionStoreIdName, + subQuery, + limit = limitQ, + orderBy = "ORDER BY valid_from DESC", + ).map(_.result.map(_.validFrom.value)) + } + + override def findTransactionsInRange( + asOfExclusive: CantonTimestamp, + upToExclusive: CantonTimestamp, + )(implicit traceContext: TraceContext): Future[StoredTopologyTransactions[TopologyChangeOp]] = { + val subQuery = + sql""" AND valid_from > $asOfExclusive AND valid_from < $upToExclusive""" + queryForTransactions( + transactionStoreIdName, + subQuery, + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreX.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreX.scala new file mode 100644 index 0000000000..009c444eae --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreX.scala @@ -0,0 +1,714 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.store.db + +import cats.syntax.foldable.* +import cats.syntax.option.* +import com.daml.nameof.NameOf.functionFullName +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.CantonRequireTypes.{LengthLimitedString, String185} +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.metrics.TimedLoadGauge +import com.digitalasset.canton.resource.DbStorage.SQLActionBuilderChain +import com.digitalasset.canton.resource.{DbStorage, DbStore} +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} +import com.digitalasset.canton.topology.store.StoredTopologyTransactionX.GenericStoredTopologyTransactionX +import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.{ + GenericStoredTopologyTransactionsX, + PositiveStoredTopologyTransactionsX, +} +import com.digitalasset.canton.topology.store.ValidatedTopologyTransactionX.GenericValidatedTopologyTransactionX +import com.digitalasset.canton.topology.store.* +import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX +import com.digitalasset.canton.topology.transaction.TopologyChangeOpX.Replace +import com.digitalasset.canton.topology.transaction.TopologyMappingX.MappingHash +import com.digitalasset.canton.topology.transaction.TopologyTransactionX.{ + GenericTopologyTransactionX, + TxHash, +} +import com.digitalasset.canton.topology.transaction.{ + DomainTrustCertificateX, + MediatorDomainStateX, + PartyToParticipantX, + SignedTopologyTransactionX, + TopologyChangeOpX, + TopologyMappingX, + TopologyTransactionX, +} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.FutureInstances.* +import com.digitalasset.canton.util.MonadUtil +import com.digitalasset.canton.version.ProtocolVersion +import slick.jdbc.GetResult +import slick.jdbc.canton.SQLActionBuilder +import slick.sql.SqlStreamingAction + +import scala.concurrent.{ExecutionContext, Future} + +class DbTopologyStoreX[StoreId <: TopologyStoreId]( + override protected val storage: DbStorage, + val storeId: StoreId, + override protected val timeouts: ProcessingTimeout, + override protected val loggerFactory: NamedLoggerFactory, + override protected val maxItemsInSqlQuery: PositiveInt = PositiveInt.tryCreate(100), +)(implicit ec: ExecutionContext) + extends TopologyStoreX[StoreId] + with DbTopologyStoreCommon[StoreId] + with DbStore { + + import DbStorage.Implicits.BuilderChain.* + import storage.api.* + import storage.converters.* + + private implicit val getResultSignedTopologyTransaction + : GetResult[GenericSignedTopologyTransactionX] = + SignedTopologyTransactionX.createGetResultDomainTopologyTransaction + + protected val transactionStoreIdName: LengthLimitedString = storeId.dbString + + protected val updatingTime: TimedLoadGauge = + storage.metrics.loadGaugeM("topology-store-x-update") + protected val readTime: TimedLoadGauge = + storage.metrics.loadGaugeM("topology-store-x-read") + + def findTransactionsByTxHash(asOfExclusive: EffectiveTime, hashes: NonEmpty[Set[TxHash]])(implicit + traceContext: TraceContext + ): Future[Seq[GenericSignedTopologyTransactionX]] = + findAsOfExclusive( + asOfExclusive, + sql" AND (" ++ hashes + .map(txHash => sql"tx_hash = ${txHash.hash.toLengthLimitedHexString}") + .forgetNE + .toList + .intercalate(sql" OR ") ++ sql")", + ) + + override def findProposalsByTxHash( + asOfExclusive: EffectiveTime, + hashes: NonEmpty[Set[TxHash]], + )(implicit traceContext: TraceContext): Future[Seq[GenericSignedTopologyTransactionX]] = + findAsOfExclusive( + asOfExclusive, + sql" AND is_proposal = true AND (" ++ hashes + .map(txHash => sql"tx_hash = ${txHash.hash.toLengthLimitedHexString}") + .forgetNE + .toList + .intercalate( + sql" OR " + ) ++ sql")", + ) + + override def findTransactionsForMapping( + asOfExclusive: EffectiveTime, + hashes: NonEmpty[Set[MappingHash]], + )(implicit + traceContext: TraceContext + ): Future[Seq[GenericSignedTopologyTransactionX]] = findAsOfExclusive( + asOfExclusive, + sql" AND is_proposal = false AND (" ++ hashes + .map(mappingHash => sql"mapping_key_hash = ${mappingHash.hash.toLengthLimitedHexString}") + .forgetNE + .toList + .intercalate(sql" OR ") ++ sql")", + ) + + /** @param elements Elements to be batched + * @param operationName Name of the operation + * @param f Create a DBIOAction from a batch + */ + private def performBatchedDbOperation[X]( + elements: Seq[X], + operationName: String, + processInParallel: Boolean, + )( + f: Seq[X] => DBIOAction[_, NoStream, Effect.Write with Effect.Transactional] + )(implicit traceContext: TraceContext) = if (elements.isEmpty) Future.successful(()) + else + MonadUtil.batchedSequentialTraverse_( + parallelism = + if (processInParallel) PositiveInt.two * storage.threadsAvailableForWriting + else PositiveInt.one, + chunkSize = maxItemsInSqlQuery, + )(elements) { elementsBatch => + storage.update_( + f(elementsBatch), + operationName = operationName, + ) + } + + /** add validated topology transaction as is to the topology transaction table + */ + override def update( + sequenced: SequencedTime, + effective: EffectiveTime, + removeMapping: Set[TopologyMappingX.MappingHash], + removeTxs: Set[TopologyTransactionX.TxHash], + additions: Seq[GenericValidatedTopologyTransactionX], + )(implicit traceContext: TraceContext): Future[Unit] = { + + val effectiveTs = effective.value + + val transactionRemovals = removeMapping.toList.map(mappingHash => + sql"mapping_key_hash=${mappingHash.hash.toLengthLimitedHexString}" + ) ++ removeTxs.map(txHash => sql"tx_hash=${txHash.hash.toLengthLimitedHexString}") + + lazy val updateRemovals = + (sql"UPDATE topology_transactions_x SET valid_until = ${Some(effectiveTs)} WHERE store_id=$transactionStoreIdName AND (" ++ + transactionRemovals + .intercalate( + sql" OR " + ) ++ sql") AND valid_from < $effectiveTs AND valid_until is null").asUpdate + + lazy val insertAdditions = + insertSignedTransaction[GenericValidatedTopologyTransactionX](vtx => + TransactionEntry( + sequenced, + effective, + Option.when( + vtx.rejectionReason.nonEmpty || vtx.expireImmediately + )(effective), + vtx.transaction, + vtx.rejectionReason, + ) + )(additions) + + updatingTime.event { + storage.update_( + DBIO.seq( + if (transactionRemovals.nonEmpty) updateRemovals else DBIO.successful(0), + if (additions.nonEmpty) insertAdditions else DBIO.successful(0), + ), + operationName = "update-topology-transactions", + ) + } + } + + // TODO(#14048) only a temporary crutch to inspect the topology state + override def dumpStoreContent()(implicit traceContext: TraceContext): Unit = { + // Helper case class to produce comparable output to the InMemoryStore + case class TopologyStoreEntry( + transaction: GenericSignedTopologyTransactionX, + sequenced: SequencedTime, + from: EffectiveTime, + until: Option[EffectiveTime], + rejected: Option[String], + ) + + val query = + sql"SELECT instance, sequenced, valid_from, valid_until, rejection_reason FROM topology_transactions_x WHERE store_id = $transactionStoreIdName ORDER BY id" + val entries = timeouts.io.await("dumpStoreContent")(readTime.event { + storage + .query( + query.as[ + ( + GenericSignedTopologyTransactionX, + CantonTimestamp, + CantonTimestamp, + Option[CantonTimestamp], + Option[String], + ) + ], + functionFullName, + ) + .map(_.map { case (tx, sequencedTs, validFrom, validUntil, rejectionReason) => + TopologyStoreEntry( + tx, + SequencedTime(sequencedTs), + EffectiveTime(validFrom), + validUntil.map(EffectiveTime(_)), + rejectionReason, + ) + }) + }) + + logger.debug( + entries + .map(_.toString) + .mkString("Topology Store Content[", ", ", "]") + ) + } + + override def inspect( + proposals: Boolean, + timeQuery: TimeQueryX, + recentTimestampO: Option[CantonTimestamp], + op: Option[TopologyChangeOpX], + typ: Option[TopologyMappingX.Code], + idFilter: String, + namespaceOnly: Boolean, + )(implicit + traceContext: TraceContext + ): Future[StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX]] = { + val timeFilter: SQLActionBuilderChain = timeQuery match { + case TimeQueryX.HeadState => + getHeadStateQuery(recentTimestampO) + case TimeQueryX.Snapshot(asOf) => + asOfQuery(asOf = asOf, asOfInclusive = false) + case TimeQueryX.Range(None, None) => + sql"" // The case below inserts an additional `AND` that we don't want + case TimeQueryX.Range(from, until) => + sql" AND " ++ ((from.toList.map(ts => sql"valid_from >= $ts") ++ until.toList.map(ts => + sql"valid_from <= $ts" + )) + .intercalate(sql" AND ")) + } + + val operationAndPreviousFilter = op match { + case Some(value) => + timeFilter ++ sql" AND operation = $value" + case None => timeFilter + } + + val idAndPreviousFilter = andIdFilter(operationAndPreviousFilter, idFilter, namespaceOnly) + + val mappingTypeAndPreviousFilter = typ match { + case Some(value) => idAndPreviousFilter ++ sql" AND transaction_type = $value" + case None => idAndPreviousFilter + } + + val mappingProposalsAndPreviousFilter = + mappingTypeAndPreviousFilter ++ sql" AND is_proposal = $proposals" + + queryForTransactions(mappingProposalsAndPreviousFilter) + } + + @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) + override def inspectKnownParties( + timestamp: CantonTimestamp, + filterParty: String, + filterParticipant: String, + limit: Int, + )(implicit traceContext: TraceContext): Future[Set[PartyId]] = { + def splitFilterPrefixAndSql(uidFilter: String): (String, String, String, String) = + UniqueIdentifier.splitFilter(uidFilter) match { + case (id, ns) => (id, ns, id + "%", ns + "%") + } + + val (prefixPartyIdentifier, prefixPartyNS, sqlPartyIdentifier, sqlPartyNS) = + splitFilterPrefixAndSql(filterParty) + val ( + prefixParticipantIdentifier, + prefixParticipantNS, + sqlParticipantIdentifier, + sqlParticipantNS, + ) = + splitFilterPrefixAndSql(filterParticipant) + + // conditional append avoids "like '%'" filters on empty filters + def conditionalAppend(filter: String, sqlIdentifier: String, sqlNamespace: String) = + if (filter.nonEmpty) + sql" AND identifier LIKE ${sqlIdentifier} AND namespace LIKE ${sqlNamespace}" + else sql"" + + queryForTransactions( + asOfQuery(timestamp, asOfInclusive = false) ++ + sql" AND NOT is_proposal AND operation = ${TopologyChangeOpX.Replace} AND (" + // PartyToParticipantX filtering + ++ Seq( + sql"(transaction_type = ${PartyToParticipantX.code}" + ++ conditionalAppend(filterParty, sqlPartyIdentifier, sqlPartyNS) + ++ sql")" + ) + ++ sql" OR " + // DomainTrustCertificateX filtering + ++ Seq( + sql"(transaction_type = ${DomainTrustCertificateX.code}" + // In DomainTrustCertificateX part of the filter, compare not only to participant, but also to party identifier + // to enable searching for the admin party + ++ conditionalAppend(filterParty, sqlPartyIdentifier, sqlPartyNS) + ++ conditionalAppend(filterParticipant, sqlParticipantIdentifier, sqlParticipantNS) + ++ sql")" + ) + ++ sql")", + storage.limit(limit), + ) + .map( + _.result.toSet + .flatMap[PartyId](_.transaction.transaction.mapping match { + // TODO(#14061): post-filtering for participantId non-columns results in fewer than limit results being returned + // - add indexed secondary uid and/or namespace columns for participant-ids - also to support efficient lookup + // of "what parties a particular participant hosts" (ParticipantId => Set[PartyId]) + case ptp: PartyToParticipantX + if filterParticipant.isEmpty || ptp.participants + .exists( + _.participantId.uid + .matchesPrefixes(prefixParticipantIdentifier, prefixParticipantNS) + ) => + Set(ptp.partyId) + case cert: DomainTrustCertificateX + if filterParty.isEmpty || cert.participantId.adminParty.uid + .matchesPrefixes(prefixPartyIdentifier, prefixPartyNS) => + Set(cert.participantId.adminParty) + case _ => Set.empty + }) + ) + } + + override def findPositiveTransactions( + asOf: CantonTimestamp, + asOfInclusive: Boolean, + isProposal: Boolean, + types: Seq[TopologyMappingX.Code], + filterUid: Option[Seq[UniqueIdentifier]], + filterNamespace: Option[Seq[Namespace]], + )(implicit traceContext: TraceContext): Future[PositiveStoredTopologyTransactionsX] = + findTransactionsBatchingUidFilter( + asOf, + asOfInclusive, + isProposal, + types.toSet, + filterUid, + filterNamespace, + TopologyChangeOpX.Replace.some, + ).map(_.collectOfType[TopologyChangeOpX.Replace]) + + override def findFirstMediatorStateForMediator(mediatorId: MediatorId)(implicit + traceContext: TraceContext + ): Future[Option[StoredTopologyTransactionX[Replace, MediatorDomainStateX]]] = + queryForTransactions( + // We don't expect too many MediatorDomainStateX mappings in a single domain, so fetching them all from the db + // is acceptable and also because we don't expect to run this query frequently. We can only evaluate the + // `mediatorId` field locally as the mediator-id is not exposed in a separate column. + sql" AND is_proposal = false" ++ + sql" AND operation = ${TopologyChangeOpX.Replace}" ++ + sql" AND transaction_type = ${MediatorDomainStateX.code}" + ).map( + _.collectOfMapping[MediatorDomainStateX] + .collectOfType[Replace] + .result + .collect { + case tx + if tx.transaction.transaction.mapping.observers.contains(mediatorId) || + tx.transaction.transaction.mapping.active.contains(mediatorId) => + tx + } + .sortBy(_.transaction.transaction.serial) + .headOption + ) + + override def findFirstTrustCertificateForParticipant(participant: ParticipantId)(implicit + traceContext: TraceContext + ): Future[Option[StoredTopologyTransactionX[Replace, DomainTrustCertificateX]]] = + queryForTransactions( + sql" AND is_proposal = false" ++ + sql" AND operation = ${TopologyChangeOpX.Replace}" ++ + sql" AND transaction_type = ${DomainTrustCertificateX.code}" ++ + sql" AND identifier = ${participant.uid.id} AND namespace = ${participant.uid.namespace}", + limit = storage.limit(1), + orderBy = " ORDER BY serial_counter ", + ).map( + _.collectOfMapping[DomainTrustCertificateX] + .collectOfType[Replace] + .result + .headOption + ) + + override def findEssentialStateForMember(member: Member, asOfInclusive: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[GenericStoredTopologyTransactionsX] = { + val timeFilter = sql" AND sequenced <= $asOfInclusive" + queryForTransactions(timeFilter).map(_.asSnapshotAtMaxEffectiveTime) + } + + override def bootstrap(snapshot: GenericStoredTopologyTransactionsX)(implicit + traceContext: TraceContext + ): Future[Unit] = updatingTime.event { + // inserts must not be processed in parallel to keep the insertion order (as indicated by the `id` column) + // in sync with the monotonicity of sequenced + performBatchedDbOperation(snapshot.result, "bootstrap", processInParallel = false) { txs => + insertSignedTransaction[GenericStoredTopologyTransactionX](TransactionEntry.fromStoredTx)(txs) + } + } + + override def findUpcomingEffectiveChanges(asOfInclusive: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[Seq[TopologyStore.Change]] = queryForTransactions( + sql" AND valid_from >= $asOfInclusive ", + orderBy = " ORDER BY valid_from", + ).map(res => TopologyStoreX.accumulateUpcomingEffectiveChanges(res.result)) + + override def maxTimestamp()(implicit + traceContext: TraceContext + ): Future[Option[(SequencedTime, EffectiveTime)]] = + queryForTransactions(sql"", storage.limit(1), orderBy = " ORDER BY id DESC") + .map(_.result.headOption.map(tx => (tx.sequenced, tx.validFrom))) + + override def findDispatchingTransactionsAfter( + timestampExclusive: CantonTimestamp, + limitO: Option[Int], + )(implicit + traceContext: TraceContext + ): Future[GenericStoredTopologyTransactionsX] = { + val subQuery = + sql" AND valid_from > $timestampExclusive AND (not is_proposal OR valid_until is NULL)" + val limitQ = limitO.fold("")(storage.limit(_)) + queryForTransactions(subQuery, limitQ) + } + + override def findStored( + transaction: GenericSignedTopologyTransactionX, + includeRejected: Boolean = false, + )(implicit + traceContext: TraceContext + ): Future[Option[GenericStoredTopologyTransactionX]] = + findStoredSql(transaction.transaction, includeRejected = includeRejected).map( + _.result.lastOption + ) + + override def findStoredForVersion( + transaction: GenericTopologyTransactionX, + protocolVersion: ProtocolVersion, + )(implicit + traceContext: TraceContext + ): Future[Option[GenericStoredTopologyTransactionX]] = { + val rpv = TopologyTransactionX.protocolVersionRepresentativeFor(protocolVersion) + + findStoredSql( + transaction, + subQuery = sql" AND representative_protocol_version = ${rpv.representative}", + ).map(_.result.lastOption) + } + + override def findParticipantOnboardingTransactions( + participantId: ParticipantId, + domainId: DomainId, + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Seq[GenericSignedTopologyTransactionX]] = for { + transactions <- FutureUnlessShutdown + .outcomeF( + queryForTransactions( + sql" AND not is_proposal " ++ + sql" AND transaction_type IN (" ++ TopologyStoreX.initialParticipantDispatchingSet.toList + .map(s => sql"$s") + .intercalate(sql", ") ++ sql") " + ) + ) + filteredTransactions = TopologyStoreX.filterInitialParticipantDispatchingTransactions( + participantId, + domainId, + transactions.result, + ) + } yield filteredTransactions + + // Insert helper shared by bootstrap and update. + private def insertSignedTransaction[T](toTxEntry: T => TransactionEntry)( + transactions: Seq[T] + ): SqlStreamingAction[Vector[Int], Int, slick.dbio.Effect.Write]#ResultAction[ + Int, + NoStream, + Effect.Write, + ] = { + def sqlTransactionParameters(transaction: T) = { + val txEntry = toTxEntry(transaction) + val signedTx = txEntry.signedTx + val validFrom = txEntry.validFrom.value + val validUntil = txEntry.validUntil.map(_.value) + val sequencedTs = txEntry.sequenced.value + val operation = signedTx.operation + val mapping = signedTx.transaction.mapping + val transactionType = mapping.code + val namespace = mapping.namespace + val identifier = mapping.maybeUid.map(_.id.toLengthLimitedString).getOrElse(String185.empty) + val serial = signedTx.transaction.serial + val mappingHash = mapping.uniqueKey.hash.toLengthLimitedHexString + val reason = txEntry.rejectionReason.map(_.asString1GB) + val txHash = signedTx.transaction.hash.hash.toLengthLimitedHexString + val isProposal = signedTx.isProposal + val representativeProtocolVersion = signedTx.transaction.representativeProtocolVersion + val hashOfSignatures = signedTx.hashOfSignatures.toLengthLimitedHexString + + storage.profile match { + case _: DbStorage.Profile.Postgres | _: DbStorage.Profile.H2 => + sql"""($transactionStoreIdName, $sequencedTs, $validFrom, $validUntil, $transactionType, $namespace, + $identifier, $mappingHash, $serial, $operation, $signedTx, $txHash, $isProposal, $reason, $representativeProtocolVersion, $hashOfSignatures)""" + case _: DbStorage.Profile.Oracle => + throw new IllegalStateException("Oracle not supported by daml 3.0/X yet") + } + } + + storage.profile match { + case _: DbStorage.Profile.Postgres | _: DbStorage.Profile.H2 => + (sql"""INSERT INTO topology_transactions_x (store_id, sequenced, valid_from, valid_until, transaction_type, namespace, + identifier, mapping_key_hash, serial_counter, operation, instance, tx_hash, is_proposal, rejection_reason, representative_protocol_version, hash_of_signatures) VALUES""" ++ + transactions + .map(sqlTransactionParameters) + .toList + .intercalate(sql", ") + ++ sql" ON CONFLICT DO NOTHING" // idempotency-"conflict" based on topology_transactions_x unique constraint + ).asUpdate + case _: DbStorage.Profile.Oracle => + throw new IllegalStateException("Oracle not supported by daml 3.0/X yet") + } + } + + // Helper to break up large uid-filters into batches to limit the size of sql "in-clauses". + // Fashioned to reuse lessons learned in 2.x-based DbTopologyStore + private def findTransactionsBatchingUidFilter( + asOf: CantonTimestamp, + asOfInclusive: Boolean, + isProposal: Boolean, + types: Set[TopologyMappingX.Code], + filterUid: Option[Seq[UniqueIdentifier]], + filterNamespace: Option[Seq[Namespace]], + filterOp: Option[TopologyChangeOpX], + )(implicit + traceContext: TraceContext + ): Future[GenericStoredTopologyTransactionsX] = { + def forwardBatch(filterUidsNew: Option[Seq[UniqueIdentifier]]) = + findTransactionsSingleBatch( + asOf, + asOfInclusive, + isProposal, + types, + filterUidsNew, + filterNamespace, + filterOp, + ) + + filterUid.map( + // Optimization: remove uid-filters made redundant by namespace filters + _.filterNot(uid => filterNamespace.exists(_.contains(uid.namespace))) + ) match { + case None => forwardBatch(None) + case Some(uids) => + MonadUtil + .batchedSequentialTraverse( + parallelism = storage.threadsAvailableForWriting, + chunkSize = maxItemsInSqlQuery, + )(uids) { batchedUidFilters => forwardBatch(Some(batchedUidFilters)).map(_.result) } + .map(StoredTopologyTransactionsX(_)) + } + } + + private def findTransactionsSingleBatch( + asOf: CantonTimestamp, + asOfInclusive: Boolean, + isProposal: Boolean, + types: Set[TopologyMappingX.Code], + filterUid: Option[Seq[UniqueIdentifier]], + filterNamespace: Option[Seq[Namespace]], + filterOp: Option[TopologyChangeOpX], + )(implicit + traceContext: TraceContext + ): Future[GenericStoredTopologyTransactionsX] = { + val hasUidFilter = filterUid.nonEmpty || filterNamespace.nonEmpty + // exit early if the caller produced an empty uid/namespace filter batch: + if (hasUidFilter && filterUid.forall(_.isEmpty) && filterNamespace.forall(_.isEmpty)) { + Future.successful(StoredTopologyTransactionsX.empty) + } else { + val timeRangeFilter = asOfQuery(asOf, asOfInclusive) + val isProposalFilter = sql" AND is_proposal = $isProposal" + val changeOpFilter = filterOp.fold(sql"")(op => sql" AND operation = $op") + val mappingTypeFilter = + sql" AND transaction_type IN (" ++ types.toSeq + .map(t => sql"$t") + .intercalate(sql", ") ++ sql")" + val uidNamespaceFilter = + if (hasUidFilter) { + val namespaceFilter = filterNamespace.toList.flatMap(_.map(ns => sql"namespace = $ns")) + val uidFilter = + filterUid.toList.flatten.map(uid => + sql"(identifier = ${uid.id} AND namespace = ${uid.namespace})" + ) + sql" AND (" ++ (namespaceFilter ++ uidFilter).intercalate(sql" OR ") ++ sql")" + } else SQLActionBuilderChain(sql"") + + queryForTransactions( + timeRangeFilter ++ isProposalFilter ++ changeOpFilter ++ mappingTypeFilter ++ uidNamespaceFilter + ) + } + } + + private def findAsOfExclusive( + effective: EffectiveTime, + subQuery: SQLActionBuilder, + )(implicit traceContext: TraceContext): Future[Seq[GenericSignedTopologyTransactionX]] = { + queryForTransactions(asOfQuery(effective.value, asOfInclusive = false) ++ subQuery) + .map(_.result.map(_.transaction)) + } + + private def findStoredSql( + transaction: GenericTopologyTransactionX, + subQuery: SQLActionBuilder = sql"", + includeRejected: Boolean = false, + )(implicit + traceContext: TraceContext + ): Future[GenericStoredTopologyTransactionsX] = { + val mapping = transaction.mapping + queryForTransactions( + // Query for leading fields of `topology_transactions_x_idx` to enable use of this index + sql" AND transaction_type = ${mapping.code} AND namespace = ${mapping.namespace} AND identifier = ${mapping.maybeUid + .fold(String185.empty)(_.id.toLengthLimitedString)}" + ++ sql" AND mapping_key_hash = ${mapping.uniqueKey.hash.toLengthLimitedHexString}" + ++ sql" AND serial_counter = ${transaction.serial}" + ++ sql" AND tx_hash = ${transaction.hash.hash.toLengthLimitedHexString}" + ++ sql" AND operation = ${transaction.op}" + ++ subQuery, + includeRejected = includeRejected, + ) + } + + private def queryForTransactions( + subQuery: SQLActionBuilder, + limit: String = "", + orderBy: String = " ORDER BY id ", + includeRejected: Boolean = false, + )(implicit + traceContext: TraceContext + ): Future[GenericStoredTopologyTransactionsX] = { + val query = + sql"SELECT instance, sequenced, valid_from, valid_until FROM topology_transactions_x WHERE store_id = $transactionStoreIdName" ++ + subQuery ++ (if (!includeRejected) sql" AND rejection_reason IS NULL" + else sql"") ++ sql" #${orderBy} #${limit}" + readTime.event { + storage + .query( + query.as[ + ( + GenericSignedTopologyTransactionX, + CantonTimestamp, + CantonTimestamp, + Option[CantonTimestamp], + ) + ], + functionFullName, + ) + .map(_.map { case (tx, sequencedTs, validFrom, validUntil) => + StoredTopologyTransactionX( + SequencedTime(sequencedTs), + EffectiveTime(validFrom), + validUntil.map(EffectiveTime(_)), + tx, + ) + }) + .map(StoredTopologyTransactionsX(_)) + } + } +} + +// Helper case class to hold StoredTopologyTransactionX-fields in update() providing umbrella +// values for all transactions. +private[db] final case class TransactionEntry( + sequenced: SequencedTime, + validFrom: EffectiveTime, + validUntil: Option[EffectiveTime], + signedTx: GenericSignedTopologyTransactionX, + rejectionReason: Option[TopologyTransactionRejection] = None, +) + +private[db] object TransactionEntry { + def fromStoredTx(stx: GenericStoredTopologyTransactionX): TransactionEntry = TransactionEntry( + stx.sequenced, + stx.validFrom, + stx.validUntil, + stx.transaction, + rejectionReason = None, + ) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStore.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStore.scala new file mode 100644 index 0000000000..da49c6baec --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStore.scala @@ -0,0 +1,621 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.store.memory + +import cats.syntax.functorFilter.* +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString.DisplayName +import com.digitalasset.canton.config.CantonRequireTypes.String255 +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.crypto.PublicKey +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} +import com.digitalasset.canton.topology.store.TopologyStore.InsertTransaction +import com.digitalasset.canton.topology.store.* +import com.digitalasset.canton.topology.transaction.TopologyChangeOp.{Add, Positive, Remove} +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.ProtocolVersion + +import java.util.concurrent.atomic.AtomicReference +import scala.annotation.nowarn +import scala.collection.concurrent.TrieMap +import scala.collection.mutable.ArrayBuffer +import scala.concurrent.{ExecutionContext, Future, blocking} + +class InMemoryPartyMetadataStore extends PartyMetadataStore { + + private val store = TrieMap[PartyId, PartyMetadata]() + + override def insertOrUpdatePartyMetadata( + partyId: PartyId, + participantId: Option[ParticipantId], + displayName: Option[DisplayName], + effectiveTimestamp: CantonTimestamp, + submissionId: String255, + )(implicit traceContext: TraceContext): Future[Unit] = { + store + .put( + partyId, + PartyMetadata(partyId, displayName, participantId)( + effectiveTimestamp = effectiveTimestamp, + submissionId = submissionId, + ), + ) + .discard + Future.unit + + } + + override def metadataForParty(partyId: PartyId)(implicit + traceContext: TraceContext + ): Future[Option[PartyMetadata]] = + Future.successful(store.get(partyId)) + + override def markNotified( + metadata: PartyMetadata + )(implicit traceContext: TraceContext): Future[Unit] = { + store.get(metadata.partyId) match { + case Some(cur) if cur.effectiveTimestamp == metadata.effectiveTimestamp => + store + .put( + metadata.partyId, + metadata.copy()( + effectiveTimestamp = metadata.effectiveTimestamp, + submissionId = metadata.submissionId, + notified = true, + ), + ) + .discard + case _ => () + } + Future.unit + } + + override def fetchNotNotified()(implicit traceContext: TraceContext): Future[Seq[PartyMetadata]] = + Future.successful(store.values.filterNot(_.notified).toSeq) + + override def close(): Unit = () +} + +trait InMemoryTopologyStoreCommon[+StoreId <: TopologyStoreId] extends NamedLogging { + this: TopologyStoreCommon[StoreId, ?, ?, ?] => + + private val watermark = new AtomicReference[Option[CantonTimestamp]](None) + + @nowarn("cat=unused") + override def currentDispatchingWatermark(implicit + traceContext: TraceContext + ): Future[Option[CantonTimestamp]] = + Future.successful(watermark.get()) + + override def updateDispatchingWatermark( + timestamp: CantonTimestamp + )(implicit traceContext: TraceContext): Future[Unit] = { + watermark.getAndSet(Some(timestamp)) match { + case Some(old) if old > timestamp => + logger.error( + s"Topology dispatching watermark is running backwards! new=$timestamp, old=${old}" + ) + case _ => () + } + Future.unit + } + +} + +class InMemoryTopologyStore[+StoreId <: TopologyStoreId]( + val storeId: StoreId, + val loggerFactory: NamedLoggerFactory, + override val timeouts: ProcessingTimeout, + futureSupervisor: FutureSupervisor, +)(implicit val ec: ExecutionContext) + extends TopologyStore[StoreId] + with InMemoryTopologyStoreCommon[StoreId] + with NamedLogging { + + private case class TopologyStoreEntry[+Op <: TopologyChangeOp]( + operation: Op, + transaction: SignedTopologyTransaction[Op], + sequenced: SequencedTime, + from: EffectiveTime, + until: Option[EffectiveTime], + rejected: Option[String], + ) { + + def toStoredTransaction: StoredTopologyTransaction[Op] = + StoredTopologyTransaction(sequenced, from, until, transaction) + + def secondaryUid: Option[UniqueIdentifier] = + transaction.transaction.element.mapping.secondaryUid + + } + + // contains Add, Remove and Replace + private val topologyTransactionStore = ArrayBuffer[TopologyStoreEntry[TopologyChangeOp]]() + // contains only (Add, Replace) transactions that are authorized + private val topologyStateStore = ArrayBuffer[TopologyStoreEntry[Positive]]() + + override def append( + sequenced: SequencedTime, + effective: EffectiveTime, + transactions: Seq[ValidatedTopologyTransaction], + )(implicit traceContext: TraceContext): Future[Unit] = blocking(synchronized { + + val (updates, appends) = TopologyStore.appends(effective.value, transactions) + + // UPDATE topology_transactions SET valid_until = ts WHERE store_id = ... AND valid_until is NULL AND valid_from < ts AND path_id IN (updates) + updates.foreach { upd => + val idx = + topologyTransactionStore.indexWhere(x => + x.transaction.uniquePath == upd && x.until.isEmpty && x.from.value < effective.value + ) + if (idx > -1) { + val item = topologyTransactionStore(idx) + topologyTransactionStore.update(idx, item.copy(until = Some(effective))) + } + } + // INSERT INTO topology_transactions (path_id, store_id, valid_from, transaction_type, operation, instance) VALUES inserts ON CONFLICT DO NOTHING + appends.foreach { case InsertTransaction(trans, validUntil, rejectionReason) => + val operation = trans.operation + + // be idempotent + if ( + !topologyTransactionStore.exists(x => + x.transaction.uniquePath == trans.uniquePath && x.from == effective && x.operation == operation + ) + ) { + topologyTransactionStore.append( + TopologyStoreEntry( + operation, + trans, + sequenced, + effective, + validUntil.map(EffectiveTime(_)), + rejectionReason.map(_.asString), + ) + ) + } + } + Future.unit + }) + + private def asOfFilter( + asOf: CantonTimestamp, + asOfInclusive: Boolean, + ): (CantonTimestamp, Option[CantonTimestamp]) => Boolean = + if (asOfInclusive) { case (validFrom, validUntil) => + validFrom <= asOf && validUntil.forall(until => asOf < until) + } + else { case (validFrom, validUntil) => + validFrom < asOf && validUntil.forall(until => asOf <= until) + } + + override def timestamp( + useStateStore: Boolean + )(implicit traceContext: TraceContext): Future[Option[(SequencedTime, EffectiveTime)]] = + Future.successful( + (if (useStateStore) topologyStateStore else topologyTransactionStore).lastOption.map(x => + (x.sequenced, x.from) + ) + ) + + private def filteredState( + table: Seq[TopologyStoreEntry[TopologyChangeOp]], + filter: TopologyStoreEntry[TopologyChangeOp] => Boolean, + includeRejected: Boolean = false, + ): Future[StoredTopologyTransactions[TopologyChangeOp]] = + Future.successful( + StoredTopologyTransactions( + table.collect { + case entry if filter(entry) && (entry.rejected.isEmpty || includeRejected) => + entry.toStoredTransaction + } + ) + ) + + override def headTransactions(implicit + traceContext: TraceContext + ): Future[StoredTopologyTransactions[Positive]] = + filteredState( + blocking(synchronized(topologyTransactionStore.toSeq)), + x => x.until.isEmpty, + ).map(_.collectOfType[Positive]) + + /** finds transactions in the local store that would remove the topology state elements + */ + override def findRemovalTransactionForMappings( + mappings: Set[TopologyStateElement[TopologyMapping]] + )(implicit + traceContext: TraceContext + ): Future[Seq[SignedTopologyTransaction[Remove]]] = + Future.successful( + blocking(synchronized(topologyTransactionStore.toSeq)) + .map(_.transaction) + .mapFilter(TopologyChangeOp.select[Remove]) + .collect { + case sit @ SignedTopologyTransaction(TopologyStateUpdate(_, element), _, _) + if mappings.contains(element) => + sit + } + ) + + override def findPositiveTransactionsForMapping( + mapping: TopologyMapping + )(implicit + traceContext: TraceContext + ): Future[Seq[SignedTopologyTransaction[Positive]]] = + Future.successful( + blocking(synchronized(topologyTransactionStore.toSeq)) + .collect { case entry if entry.until.isEmpty => entry.transaction } + .mapFilter(TopologyChangeOp.select[Positive]) + .collect { + case sit if sit.transaction.element.mapping == mapping => sit + } + ) + + override def allTransactions(includeRejected: Boolean = false)(implicit + traceContext: TraceContext + ): Future[StoredTopologyTransactions[TopologyChangeOp]] = + filteredState( + blocking(synchronized(topologyTransactionStore.toSeq)), + _ => true, + includeRejected, + ) + + override def findStored( + transaction: SignedTopologyTransaction[TopologyChangeOp], + includeRejected: Boolean = false, + )(implicit + traceContext: TraceContext + ): Future[Option[StoredTopologyTransaction[TopologyChangeOp]]] = + allTransactions(includeRejected).map(_.result.find(_.transaction == transaction)) + + override def findStoredNoSignature(transaction: TopologyTransaction[TopologyChangeOp])(implicit + traceContext: TraceContext + ): Future[Seq[StoredTopologyTransaction[TopologyChangeOp]]] = + allTransactions().map( + _.result.filter(_.transaction.transaction.element.mapping == transaction.element.mapping) + ) + + override def findStoredForVersion( + transaction: TopologyTransaction[TopologyChangeOp], + protocolVersion: ProtocolVersion, + )(implicit + traceContext: TraceContext + ): Future[Option[StoredTopologyTransaction[TopologyChangeOp]]] = + allTransactions().map( + _.result.find(tx => + tx.transaction.transaction == transaction && tx.transaction.representativeProtocolVersion == TopologyTransaction + .protocolVersionRepresentativeFor(protocolVersion) + ) + ) + + override def findPositiveTransactions( + asOf: CantonTimestamp, + asOfInclusive: Boolean, + includeSecondary: Boolean, + types: Seq[DomainTopologyTransactionType], + filterUid: Option[Seq[UniqueIdentifier]], + filterNamespace: Option[Seq[Namespace]], + )(implicit traceContext: TraceContext): Future[PositiveStoredTopologyTransactions] = + findPositiveTransactionsInStore( + topologyTransactionStore, + asOf, + asOfInclusive, + includeSecondary, + types, + filterUid, + filterNamespace, + ) + + /** query interface used by [[com.digitalasset.canton.topology.client.StoreBasedTopologySnapshot]] */ + override def findStateTransactions( + asOf: CantonTimestamp, + asOfInclusive: Boolean, + includeSecondary: Boolean, + types: Seq[DomainTopologyTransactionType], + filterUid: Option[Seq[UniqueIdentifier]], + filterNamespace: Option[Seq[Namespace]], + )(implicit + traceContext: TraceContext + ): Future[PositiveStoredTopologyTransactions] = + findPositiveTransactionsInStore( + topologyStateStore, + asOf, + asOfInclusive, + includeSecondary, + types, + filterUid, + filterNamespace, + ) + + private def findTransactionsInStore[Op <: TopologyChangeOp]( + store: ArrayBuffer[TopologyStoreEntry[Op]], + asOf: CantonTimestamp, + asOfInclusive: Boolean, + includeSecondary: Boolean, + types: Seq[DomainTopologyTransactionType], + filterUid: Option[Seq[UniqueIdentifier]], + filterNamespace: Option[Seq[Namespace]], + ): Future[StoredTopologyTransactions[TopologyChangeOp]] = { + val timeFilter = asOfFilter(asOf, asOfInclusive) + def pathFilter(path: UniquePath): Boolean = { + if (filterUid.isEmpty && filterNamespace.isEmpty) + true + else { + path.maybeUid.exists(uid => filterUid.exists(_.contains(uid))) || + filterNamespace.exists(_.contains(path.namespace)) + } + } + // filter for secondary uids (required for cascading updates) + def secondaryFilter(entry: TopologyStoreEntry[TopologyChangeOp]): Boolean = + includeSecondary && + entry.secondaryUid.exists(uid => + filterNamespace.exists(_.contains(uid.namespace)) || + filterUid.exists(_.contains(uid)) + ) + + filteredState( + blocking(synchronized { store.toSeq }), + entry => { + timeFilter(entry.from.value, entry.until.map(_.value)) && + types.contains(entry.transaction.uniquePath.dbType) && + (pathFilter(entry.transaction.uniquePath) || secondaryFilter(entry)) + }, + ) + } + + private def findPositiveTransactionsInStore[Op <: TopologyChangeOp]( + store: ArrayBuffer[TopologyStoreEntry[Op]], + asOf: CantonTimestamp, + asOfInclusive: Boolean, + includeSecondary: Boolean, + types: Seq[DomainTopologyTransactionType], + filterUid: Option[Seq[UniqueIdentifier]], + filterNamespace: Option[Seq[Namespace]], + ): Future[PositiveStoredTopologyTransactions] = + findTransactionsInStore( + store = store, + asOf = asOf, + asOfInclusive = asOfInclusive, + includeSecondary = includeSecondary, + types = types, + filterUid = filterUid, + filterNamespace = filterNamespace, + ).map(_.positiveTransactions) + + /** query interface used by DomainTopologyManager to find the set of initial keys */ + override def findInitialState( + id: DomainTopologyManagerId + )(implicit traceContext: TraceContext): Future[Map[Member, Seq[PublicKey]]] = { + val res = topologyTransactionStore.foldLeft((false, Map.empty[Member, Seq[PublicKey]])) { + case ((false, acc), TopologyStoreEntry(Add, transaction, _, _, _, None)) => + TopologyStore.findInitialStateAccumulator(id.uid, acc, transaction) + case (acc, _) => acc + } + Future.successful(res._2) + } + + /** update active topology transaction to the active topology transaction table + * + * active means that for the key authorizing the transaction, there is a connected path to reach the root certificate + */ + override def updateState( + sequenced: SequencedTime, + effective: EffectiveTime, + deactivate: Seq[UniquePath], + positive: Seq[SignedTopologyTransaction[Positive]], + )(implicit traceContext: TraceContext): Future[Unit] = { + + blocking(synchronized { + val deactivateS = deactivate.toSet + // UPDATE topology_state SET valid_until = ts WHERE store_id = ... AND valid_from < ts AND valid_until is NULL and path_id in Deactivate) + deactivate.foreach { _up => + val idx = topologyStateStore.indexWhere(entry => + entry.from.value < effective.value && entry.until.isEmpty && + deactivateS.contains(entry.transaction.uniquePath) + ) + if (idx != -1) { + val item = topologyStateStore(idx) + topologyStateStore.update(idx, item.copy(until = Some(effective))) + } + } + + // INSERT IGNORE (sit) + positive.foreach { sit => + if ( + !topologyStateStore.exists(x => + x.transaction.uniquePath == sit.uniquePath && x.from.value == effective.value && x.operation == sit.operation + ) + ) { + topologyStateStore.append( + TopologyStoreEntry( + sit.operation, + sit, + sequenced, + effective, + None, + None, + ) + ) + } + } + }) + Future.unit + } + + override def inspectKnownParties( + timestamp: CantonTimestamp, + filterParty: String, + filterParticipant: String, + limit: Int, + )(implicit traceContext: TraceContext): Future[Set[PartyId]] = { + def filter(entry: TopologyStoreEntry[Positive]): Boolean = { + // active + entry.from.value < timestamp && entry.until.forall(until => timestamp <= until.value) && + // not rejected + entry.rejected.isEmpty && + // matches either a party to participant mapping (with appropriate filters) + ((entry.transaction.uniquePath.dbType == DomainTopologyTransactionType.PartyToParticipant && + entry.transaction.uniquePath.maybeUid.exists(_.toProtoPrimitive.startsWith(filterParty)) && + entry.secondaryUid.exists(_.toProtoPrimitive.startsWith(filterParticipant))) || + // or matches a participant with appropriate filters + (entry.transaction.uniquePath.dbType == DomainTopologyTransactionType.ParticipantState && + entry.transaction.uniquePath.maybeUid + .exists(_.toProtoPrimitive.startsWith(filterParty)) && + entry.transaction.uniquePath.maybeUid + .exists(_.toProtoPrimitive.startsWith(filterParticipant)))) + } + val topologyStateStoreSeq = blocking(synchronized(topologyStateStore.toSeq)) + Future.successful( + topologyStateStoreSeq + .foldLeft(Set.empty[PartyId]) { + case (acc, elem) if acc.size >= limit || !filter(elem) => acc + case (acc, elem) => elem.transaction.uniquePath.maybeUid.fold(acc)(x => acc + PartyId(x)) + } + ) + } + + /** query optimized for inspection */ + override def inspect( + stateStore: Boolean, + timeQuery: TimeQuery, + recentTimestampO: Option[CantonTimestamp], + ops: Option[TopologyChangeOp], + typ: Option[DomainTopologyTransactionType], + idFilter: String, + namespaceOnly: Boolean, + )(implicit + traceContext: TraceContext + ): Future[StoredTopologyTransactions[TopologyChangeOp]] = { + val store = if (stateStore) topologyStateStore else topologyTransactionStore + def mkAsOfFlt(asOf: CantonTimestamp): TopologyStoreEntry[TopologyChangeOp] => Boolean = entry => + asOfFilter(asOf, asOfInclusive = false)(entry.from.value, entry.until.map(_.value)) + val filter1: TopologyStoreEntry[TopologyChangeOp] => Boolean = timeQuery match { + case TimeQuery.HeadState => + // use recent timestamp to avoid race conditions (as we are looking + // directly into the store, while the recent time still needs to propagate) + recentTimestampO.map(mkAsOfFlt).getOrElse(entry => entry.until.isEmpty) + case TimeQuery.Snapshot(asOf) => mkAsOfFlt(asOf) + case TimeQuery.Range(from, until) => + entry => + from.forall(ts => entry.from.value >= ts) && until.forall(ts => entry.from.value <= ts) + } + + val filter2: TopologyStoreEntry[TopologyChangeOp] => Boolean = entry => + ops.forall(_ == entry.operation) + + val filter3: TopologyStoreEntry[TopologyChangeOp] => Boolean = { + if (idFilter.isEmpty) _ => true + else if (namespaceOnly) { entry => + entry.transaction.uniquePath.namespace.fingerprint.unwrap.startsWith(idFilter) + } else { + val splitted = idFilter.split(SafeSimpleString.delimiter) + val prefix = splitted(0) + if (splitted.lengthCompare(1) > 0) { + val suffix = splitted(1) + (entry: TopologyStoreEntry[TopologyChangeOp]) => + entry.transaction.uniquePath.maybeUid.forall(_.id.unwrap.startsWith(prefix)) && + entry.transaction.uniquePath.namespace.fingerprint.unwrap.startsWith(suffix) + } else { entry => + entry.transaction.uniquePath.maybeUid.forall(_.id.unwrap.startsWith(prefix)) + } + } + } + filteredState( + blocking(synchronized(store.toSeq)), + entry => + typ.forall(_ == entry.transaction.uniquePath.dbType) && filter1(entry) && filter2( + entry + ) && filter3(entry), + ) + } + + override def findUpcomingEffectiveChanges(asOfInclusive: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[Seq[TopologyStore.Change]] = + Future.successful( + TopologyStore.Change.accumulateUpcomingEffectiveChanges( + blocking(synchronized(topologyTransactionStore.toSeq)) + .filter(_.from.value >= asOfInclusive) + .map(_.toStoredTransaction) + ) + ) + + override def findDispatchingTransactionsAfter( + timestampExclusive: CantonTimestamp, + limit: Option[Int], + )(implicit traceContext: TraceContext): Future[StoredTopologyTransactions[TopologyChangeOp]] = + blocking(synchronized { + val selected = topologyTransactionStore + .filter(x => + x.from.value > timestampExclusive && (x.until.isEmpty || x.operation == TopologyChangeOp.Remove) && x.rejected.isEmpty + ) + .map(_.toStoredTransaction) + .toSeq + Future.successful(StoredTopologyTransactions(limit.fold(selected)(selected.take))) + }) + + override def findParticipantOnboardingTransactions( + participantId: ParticipantId, + domainId: DomainId, + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Seq[SignedTopologyTransaction[TopologyChangeOp]]] = { + val res = blocking(synchronized { + topologyTransactionStore.filter(x => + x.until.isEmpty && TopologyStore.initialParticipantDispatchingSet.contains( + x.transaction.uniquePath.dbType + ) + ) + }) + + TopologyStore.filterInitialParticipantDispatchingTransactions( + participantId, + domainId, + this, + loggerFactory, + StoredTopologyTransactions(res.map(_.toStoredTransaction).toSeq), + timeouts, + futureSupervisor, + ) + } + + override def findTsOfParticipantStateChangesBefore( + beforeExclusive: CantonTimestamp, + participantId: ParticipantId, + limit: Int, + )(implicit traceContext: TraceContext): Future[Seq[CantonTimestamp]] = blocking(synchronized { + val ret = topologyTransactionStore + .filter(x => + x.from.value < beforeExclusive && + x.transaction.transaction.element.mapping.dbType == DomainTopologyTransactionType.ParticipantState && + x.transaction.uniquePath.maybeUid.contains(participantId.uid) + ) + .map(_.from.value) + .sorted(CantonTimestamp.orderCantonTimestamp.toOrdering.reverse) + .take(limit) + Future.successful(ret.toSeq) + }) + + override def findTransactionsInRange( + asOfExclusive: CantonTimestamp, + upToExclusive: CantonTimestamp, + )(implicit traceContext: TraceContext): Future[StoredTopologyTransactions[TopologyChangeOp]] = + blocking(synchronized { + val ret = topologyTransactionStore + .filter(x => + x.from.value > asOfExclusive && x.from.value < upToExclusive && x.rejected.isEmpty + ) + .map(_.toStoredTransaction) + Future.successful(StoredTopologyTransactions(ret.toSeq)) + }) + + override def onClosed(): Unit = () + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStoreX.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStoreX.scala new file mode 100644 index 0000000000..ac0f18dbaf --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStoreX.scala @@ -0,0 +1,516 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.store.memory + +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} +import com.digitalasset.canton.topology.store.StoredTopologyTransactionX.GenericStoredTopologyTransactionX +import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.{ + GenericStoredTopologyTransactionsX, + PositiveStoredTopologyTransactionsX, +} +import com.digitalasset.canton.topology.store.ValidatedTopologyTransactionX.GenericValidatedTopologyTransactionX +import com.digitalasset.canton.topology.store.* +import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX +import com.digitalasset.canton.topology.transaction.TopologyMappingX.MappingHash +import com.digitalasset.canton.topology.transaction.TopologyTransactionX.{ + GenericTopologyTransactionX, + TxHash, +} +import com.digitalasset.canton.topology.transaction.* +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.ProtocolVersion + +import scala.collection.mutable.ArrayBuffer +import scala.concurrent.{ExecutionContext, Future, blocking} + +class InMemoryTopologyStoreX[+StoreId <: TopologyStoreId]( + val storeId: StoreId, + val loggerFactory: NamedLoggerFactory, + override val timeouts: ProcessingTimeout, +)(implicit ec: ExecutionContext) + extends TopologyStoreX[StoreId] + with InMemoryTopologyStoreCommon[StoreId] + with NamedLogging { + + override def onClosed(): Unit = () + + private case class TopologyStoreEntry( + transaction: GenericSignedTopologyTransactionX, + sequenced: SequencedTime, + from: EffectiveTime, + rejected: Option[String], + until: Option[EffectiveTime], + ) { + def toStoredTransaction: StoredTopologyTransactionX[TopologyChangeOpX, TopologyMappingX] = + StoredTopologyTransactionX(sequenced, from, until, transaction) + } + + private val topologyTransactionStore = ArrayBuffer[TopologyStoreEntry]() + + def findTransactionsByTxHash(asOfExclusive: EffectiveTime, hashes: NonEmpty[Set[TxHash]])(implicit + traceContext: TraceContext + ): Future[Seq[GenericSignedTopologyTransactionX]] = findFilter( + asOfExclusive, + entry => hashes.contains(entry.transaction.transaction.hash), + ) + + override def findProposalsByTxHash( + asOfExclusive: EffectiveTime, + hashes: NonEmpty[Set[TxHash]], + )(implicit traceContext: TraceContext): Future[Seq[GenericSignedTopologyTransactionX]] = { + findFilter( + asOfExclusive, + entry => hashes.contains(entry.transaction.transaction.hash) && entry.transaction.isProposal, + ) + } + + private def findFilter( + asOfExclusive: EffectiveTime, + filter: TopologyStoreEntry => Boolean, + ): Future[Seq[GenericSignedTopologyTransactionX]] = { + blocking { + synchronized { + val res = topologyTransactionStore + .filter(x => + x.from.value < asOfExclusive.value + && x.rejected.isEmpty + && (x.until.forall(_.value >= asOfExclusive.value)) + && filter(x) + ) + .map(_.transaction) + .toSeq + Future.successful(res) + } + } + } + + override def findTransactionsForMapping( + asOfExclusive: EffectiveTime, + hashes: NonEmpty[Set[MappingHash]], + )(implicit + traceContext: TraceContext + ): Future[Seq[GenericSignedTopologyTransactionX]] = { + findFilter( + asOfExclusive, + entry => + !entry.transaction.isProposal && hashes.contains( + entry.transaction.transaction.mapping.uniqueKey + ), + ) + } + + override def update( + sequenced: SequencedTime, + effective: EffectiveTime, + removeMapping: Set[TopologyMappingX.MappingHash], + removeTxs: Set[TopologyTransactionX.TxHash], + additions: Seq[GenericValidatedTopologyTransactionX], + )(implicit traceContext: TraceContext): Future[Unit] = + blocking { + synchronized { + // transactionally + // UPDATE txs SET valid_until = effective WHERE effective < $effective AND valid_from is NULL + // AND ((mapping_key_hash IN $removeMapping) OR (tx_hash IN $removeTxs)) + // INSERT IGNORE DUPLICATES (...) + topologyTransactionStore.zipWithIndex.foreach { case (tx, idx) => + if ( + tx.from.value < effective.value && tx.until.isEmpty && (removeMapping.contains( + tx.transaction.transaction.mapping.uniqueKey + ) || removeTxs.contains(tx.transaction.transaction.hash)) + ) { + topologyTransactionStore.update(idx, tx.copy(until = Some(effective))) + } + } + topologyTransactionStore.appendAll( + additions.map(tx => + TopologyStoreEntry( + tx.transaction, + sequenced, + from = effective, + rejected = tx.rejectionReason.map(_.toString), + until = Option.when( + tx.rejectionReason.nonEmpty || tx.expireImmediately + )(effective), + ) + ) + ) + Future.unit + } + } + + // TODO(#14048) only a temporary crutch to inspect the topology state + override def dumpStoreContent()(implicit traceContext: TraceContext): Unit = { + blocking { + synchronized { + logger.debug( + topologyTransactionStore + .map(_.toString) + .mkString("Topology Store Content[", ", ", "]") + ) + + } + } + } + + private def asOfFilter( + asOf: CantonTimestamp, + asOfInclusive: Boolean, + ): (CantonTimestamp, Option[CantonTimestamp]) => Boolean = + if (asOfInclusive) { case (validFrom, validUntil) => + validFrom <= asOf && validUntil.forall(until => asOf < until) + } + else { case (validFrom, validUntil) => + validFrom < asOf && validUntil.forall(until => asOf <= until) + } + + private def filteredState( + table: Seq[TopologyStoreEntry], + filter: TopologyStoreEntry => Boolean, + includeRejected: Boolean = false, + ): Future[StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX]] = + Future.successful( + StoredTopologyTransactionsX( + table.collect { + case entry if filter(entry) && (entry.rejected.isEmpty || includeRejected) => + entry.toStoredTransaction + } + ) + ) + + override def inspectKnownParties( + timestamp: CantonTimestamp, + filterParty: String, + filterParticipant: String, + limit: Int, + )(implicit traceContext: TraceContext): Future[Set[PartyId]] = { + val (prefixPartyIdentifier, prefixPartyNS) = UniqueIdentifier.splitFilter(filterParty) + val (prefixParticipantIdentifier, prefixParticipantNS) = + UniqueIdentifier.splitFilter(filterParticipant) + + def filter(entry: TopologyStoreEntry): Boolean = { + // active + entry.from.value < timestamp && entry.until.forall(until => timestamp <= until.value) && + // not rejected + entry.rejected.isEmpty && + // is not a proposal + !entry.transaction.isProposal && + // is of type Replace + entry.transaction.operation == TopologyChangeOpX.Replace && + // matches a party to participant mapping (with appropriate filters) + (entry.transaction.transaction.mapping match { + case ptp: PartyToParticipantX => + ptp.partyId.uid.matchesPrefixes(prefixPartyIdentifier, prefixPartyNS) && + (filterParticipant.isEmpty || + ptp.participants.exists( + _.participantId.uid + .matchesPrefixes(prefixParticipantIdentifier, prefixParticipantNS) + )) + case cert: DomainTrustCertificateX => + cert.participantId.adminParty.uid + .matchesPrefixes(prefixPartyIdentifier, prefixPartyNS) && + cert.participantId.uid + .matchesPrefixes(prefixParticipantIdentifier, prefixParticipantNS) + case _ => false + }) + } + + val topologyStateStoreSeq = blocking(synchronized(topologyTransactionStore.toSeq)) + Future.successful( + topologyStateStoreSeq + .foldLeft(Set.empty[PartyId]) { + case (acc, elem) if acc.size >= limit || !filter(elem) => acc + case (acc, elem) => + elem.transaction.transaction.mapping.maybeUid.fold(acc)(x => acc + PartyId(x)) + } + ) + } + + override def inspect( + proposals: Boolean, + timeQuery: TimeQueryX, + recentTimestampO: Option[CantonTimestamp], + op: Option[TopologyChangeOpX], + typ: Option[TopologyMappingX.Code], + idFilter: String, + namespaceOnly: Boolean, + )(implicit + traceContext: TraceContext + ): Future[StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX]] = { + def mkAsOfFilter(asOf: CantonTimestamp): TopologyStoreEntry => Boolean = entry => + asOfFilter(asOf, asOfInclusive = false)(entry.from.value, entry.until.map(_.value)) + + val filter1: TopologyStoreEntry => Boolean = timeQuery match { + case TimeQueryX.HeadState => + // use recent timestamp to avoid race conditions (as we are looking + // directly into the store, while the recent time still needs to propagate) + recentTimestampO.map(mkAsOfFilter).getOrElse(entry => entry.until.isEmpty) + case TimeQueryX.Snapshot(asOf) => mkAsOfFilter(asOf) + case TimeQueryX.Range(from, until) => + entry => + from.forall(ts => entry.from.value >= ts) && until.forall(ts => entry.from.value <= ts) + } + + val filter2: TopologyStoreEntry => Boolean = entry => + op.forall(_ == entry.transaction.operation) + + val filter3: TopologyStoreEntry => Boolean = { + if (idFilter.isEmpty) _ => true + else if (namespaceOnly) { entry => + entry.transaction.transaction.mapping.namespace.fingerprint.unwrap.startsWith(idFilter) + } else { + val split = idFilter.split(SafeSimpleString.delimiter) + val prefix = split(0) + if (split.lengthCompare(1) > 0) { + val suffix = split(1) + (entry: TopologyStoreEntry) => + entry.transaction.transaction.mapping.maybeUid.exists(_.id.unwrap.startsWith(prefix)) && + entry.transaction.transaction.mapping.namespace.fingerprint.unwrap.startsWith(suffix) + } else { entry => + entry.transaction.transaction.mapping.maybeUid.exists(_.id.unwrap.startsWith(prefix)) + } + } + } + filteredState( + blocking(synchronized(topologyTransactionStore.toSeq)), + entry => + typ.forall( + _ == entry.transaction.transaction.mapping.code + ) && (entry.transaction.isProposal == proposals) && filter1(entry) && filter2( + entry + ) && filter3(entry), + ) + } + + override def findPositiveTransactions( + asOf: CantonTimestamp, + asOfInclusive: Boolean, + isProposal: Boolean, + types: Seq[TopologyMappingX.Code], + filterUid: Option[Seq[UniqueIdentifier]], + filterNamespace: Option[Seq[Namespace]], + )(implicit traceContext: TraceContext): Future[PositiveStoredTopologyTransactionsX] = + findTransactionsInStore(asOf, asOfInclusive, isProposal, types, filterUid, filterNamespace).map( + _.collectOfType[TopologyChangeOpX.Replace] + ) + + private def findTransactionsInStore( + asOf: CantonTimestamp, + asOfInclusive: Boolean, + isProposal: Boolean, + types: Seq[TopologyMappingX.Code], + filterUid: Option[Seq[UniqueIdentifier]], + filterNamespace: Option[Seq[Namespace]], + ): Future[GenericStoredTopologyTransactionsX] = { + val timeFilter = asOfFilter(asOf, asOfInclusive) + def pathFilter(mapping: TopologyMappingX): Boolean = { + if (filterUid.isEmpty && filterNamespace.isEmpty) + true + else { + mapping.maybeUid.exists(uid => filterUid.exists(_.contains(uid))) || + filterNamespace.exists(_.contains(mapping.namespace)) + } + } + filteredState( + blocking(synchronized { topologyTransactionStore.toSeq }), + entry => { + timeFilter(entry.from.value, entry.until.map(_.value)) && + types.contains(entry.transaction.transaction.mapping.code) && + (pathFilter(entry.transaction.transaction.mapping)) && + entry.transaction.isProposal == isProposal + }, + ) + } + + override def findFirstMediatorStateForMediator( + mediatorId: MediatorId + )(implicit + traceContext: TraceContext + ): Future[Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, MediatorDomainStateX]]] = { + filteredState( + blocking(synchronized(topologyTransactionStore.toSeq)), + entry => + !entry.transaction.isProposal && + entry.transaction.transaction.op == TopologyChangeOpX.Replace && + entry.transaction.transaction.mapping + .select[MediatorDomainStateX] + .exists(m => m.observers.contains(mediatorId) || m.active.contains(mediatorId)), + ).map( + _.collectOfType[TopologyChangeOpX.Replace] + .collectOfMapping[MediatorDomainStateX] + .result + .sortBy(_.transaction.transaction.serial) + .headOption + ) + } + + def findFirstTrustCertificateForParticipant( + participant: ParticipantId + )(implicit + traceContext: TraceContext + ): Future[ + Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, DomainTrustCertificateX]] + ] = { + filteredState( + blocking(synchronized(topologyTransactionStore.toSeq)), + entry => + !entry.transaction.isProposal && + entry.transaction.transaction.op == TopologyChangeOpX.Replace && + entry.transaction.transaction.mapping + .select[DomainTrustCertificateX] + .exists(_.participantId == participant), + ).map( + _.collectOfType[TopologyChangeOpX.Replace] + .collectOfMapping[DomainTrustCertificateX] + .result + .sortBy(_.transaction.transaction.serial) + .headOption + ) + + } + + override def findEssentialStateForMember(member: Member, asOfInclusive: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[GenericStoredTopologyTransactionsX] = { + // asOfInclusive is the effective time of the transaction that onboarded the member. + // 1. load all transactions with a sequenced time <= asOfInclusive, including proposals + filteredState( + blocking(synchronized { + topologyTransactionStore.toSeq + }), + entry => entry.sequenced.value <= asOfInclusive, + ).map( + // 2. transform the result such that the validUntil fields are set as they were at maxEffective time of the snapshot + _.asSnapshotAtMaxEffectiveTime + ) + } + + /** store an initial set of topology transactions as given into the store */ + override def bootstrap( + snapshot: GenericStoredTopologyTransactionsX + )(implicit traceContext: TraceContext): Future[Unit] = Future { + blocking { + synchronized { + topologyTransactionStore + .appendAll( + snapshot.result.map { tx => + TopologyStoreEntry( + tx.transaction, + tx.sequenced, + tx.validFrom, + rejected = None, + until = tx.validUntil, + ) + } + ) + .discard + } + } + } + + override def findUpcomingEffectiveChanges(asOfInclusive: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[Seq[TopologyStore.Change]] = + Future { + blocking { + synchronized { + TopologyStoreX.accumulateUpcomingEffectiveChanges( + topologyTransactionStore + .filter(_.from.value >= asOfInclusive) + .map(_.toStoredTransaction) + .toSeq + ) + } + } + } + + override def maxTimestamp()(implicit + traceContext: TraceContext + ): Future[Option[(SequencedTime, EffectiveTime)]] = Future { + blocking { + synchronized { + topologyTransactionStore.lastOption.map(x => (x.sequenced, x.from)) + } + } + } + + override def findDispatchingTransactionsAfter( + timestampExclusive: CantonTimestamp, + limit: Option[Int], + )(implicit + traceContext: TraceContext + ): Future[GenericStoredTopologyTransactionsX] = + blocking(synchronized { + val selected = topologyTransactionStore + .filter(x => + x.from.value > timestampExclusive && (!x.transaction.isProposal || x.until.isEmpty) && x.rejected.isEmpty + ) + .map(_.toStoredTransaction) + .toSeq + Future.successful(StoredTopologyTransactionsX(limit.fold(selected)(selected.take))) + }) + + private def allTransactions( + includeRejected: Boolean = false + ): Future[GenericStoredTopologyTransactionsX] = + filteredState( + blocking(synchronized(topologyTransactionStore.toSeq)), + _ => true, + includeRejected, + ) + + override def findStored( + transaction: GenericSignedTopologyTransactionX, + includeRejected: Boolean = false, + )(implicit + traceContext: TraceContext + ): Future[Option[GenericStoredTopologyTransactionX]] = + allTransactions(includeRejected).map( + _.result.findLast(_.transaction.transaction.hash == transaction.transaction.hash) + ) + + override def findStoredForVersion( + transaction: GenericTopologyTransactionX, + protocolVersion: ProtocolVersion, + )(implicit + traceContext: TraceContext + ): Future[Option[GenericStoredTopologyTransactionX]] = { + val rpv = TopologyTransactionX.protocolVersionRepresentativeFor(protocolVersion) + + allTransactions().map( + _.result.findLast(tx => + tx.transaction.transaction == transaction && tx.transaction.representativeProtocolVersion == rpv + ) + ) + } + + override def findParticipantOnboardingTransactions( + participantId: ParticipantId, + domainId: DomainId, + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Seq[GenericSignedTopologyTransactionX]] = { + val res = blocking(synchronized { + topologyTransactionStore.filter(x => + !x.transaction.isProposal && TopologyStoreX.initialParticipantDispatchingSet.contains( + x.transaction.transaction.mapping.code + ) + ) + }) + + FutureUnlessShutdown.pure( + TopologyStoreX.filterInitialParticipantDispatchingTransactions( + participantId, + domainId, + res.map(_.toStoredTransaction).toSeq, + ) + ) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/ParticipantAttributes.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/ParticipantAttributes.scala new file mode 100644 index 0000000000..030d1527bc --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/ParticipantAttributes.scala @@ -0,0 +1,144 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.transaction + +import com.digitalasset.canton.ProtoDeserializationError.* +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.v0 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult + +/** If [[trustLevel]] is [[TrustLevel.Vip]], + * then [[permission]]`.`[[ParticipantPermission.canConfirm canConfirm]] must hold. + */ +final case class ParticipantAttributes(permission: ParticipantPermission, trustLevel: TrustLevel) { + // Make sure that VIPs can always confirm so that + // downstream code does not have to handle VIPs that cannot confirm. + require( + trustLevel != TrustLevel.Vip || permission.canConfirm, + "Found a Vip that cannot confirm. This is not supported.", + ) + + def merge(elem: ParticipantAttributes): ParticipantAttributes = + ParticipantAttributes( + permission = ParticipantPermission.lowerOf(permission, elem.permission), + trustLevel = TrustLevel.lowerOf(trustLevel, elem.trustLevel), + ) + +} + +/** Permissions of a participant, i.e., things a participant can do on behalf of a party + * + * Permissions are hierarchical. A participant who can submit can confirm. A participant who can confirm can observe. + */ +sealed trait ParticipantPermission extends Product with Serializable { + def canConfirm: Boolean = false // can confirm transactions + def isActive: Boolean = true // can receive messages + val level: Byte // used for serialization and ordering. + def toProtoEnum: v0.ParticipantPermission + + def tryToX: ParticipantPermissionX = this match { + case ParticipantPermission.Submission => ParticipantPermissionX.Submission + case ParticipantPermission.Confirmation => ParticipantPermissionX.Confirmation + case ParticipantPermission.Observation => ParticipantPermissionX.Observation + case ParticipantPermission.Disabled => + throw new RuntimeException( + "ParticipantPermission.Disable does not exist in ParticipantPermissionX" + ) + } +} + +object ParticipantPermission { + case object Submission extends ParticipantPermission { + override val canConfirm = true + val level = 1 + val toProtoEnum: v0.ParticipantPermission = v0.ParticipantPermission.Submission + } + case object Confirmation extends ParticipantPermission { + override val canConfirm = true + val level = 2 + val toProtoEnum: v0.ParticipantPermission = v0.ParticipantPermission.Confirmation + } + case object Observation extends ParticipantPermission { + val level = 3 + val toProtoEnum: v0.ParticipantPermission = v0.ParticipantPermission.Observation + } + // in 3.0, participants can't be disabled anymore. they can be purged for good + @Deprecated(since = "3.0.0") + case object Disabled extends ParticipantPermission { + override def isActive = false + val level = 4 + val toProtoEnum = v0.ParticipantPermission.Disabled + } + // TODO(i2213): add purging of participants + + def fromProtoEnum( + permission: v0.ParticipantPermission + ): ParsingResult[ParticipantPermission] = { + permission match { + case v0.ParticipantPermission.Observation => Right(ParticipantPermission.Observation) + case v0.ParticipantPermission.Confirmation => Right(ParticipantPermission.Confirmation) + case v0.ParticipantPermission.Submission => Right(ParticipantPermission.Submission) + case v0.ParticipantPermission.Disabled => Right(ParticipantPermission.Disabled) + case v0.ParticipantPermission.MissingParticipantPermission => + Left(FieldNotSet(permission.name)) + case v0.ParticipantPermission.Unrecognized(x) => Left(UnrecognizedEnum(permission.name, x)) + } + } + + implicit val orderingParticipantPermission: Ordering[ParticipantPermission] = + Ordering.by[ParticipantPermission, Byte](_.level).reverse + + def lowerOf(fst: ParticipantPermission, snd: ParticipantPermission): ParticipantPermission = { + if (fst.level > snd.level) + fst + else snd + } + + def higherOf(fst: ParticipantPermission, snd: ParticipantPermission): ParticipantPermission = { + if (fst.level < snd.level) + fst + else snd + } + +} + +/** The trust level of the participant. Can be either Ordinary or Vip + */ +sealed trait TrustLevel extends Product with Serializable with PrettyPrinting { + def toProtoEnum: v0.TrustLevel + def rank: Byte + + override def pretty: Pretty[TrustLevel] = prettyOfObject[TrustLevel] + + def toX: TrustLevelX = this match { + case TrustLevel.Ordinary => TrustLevelX.Ordinary + case TrustLevel.Vip => TrustLevelX.Vip + } +} + +object TrustLevel { + + def lowerOf(fst: TrustLevel, snd: TrustLevel): TrustLevel = if (fst.rank < snd.rank) fst else snd + + def higherOf(fst: TrustLevel, snd: TrustLevel): TrustLevel = if (fst.rank > snd.rank) fst else snd + + case object Ordinary extends TrustLevel { + override def toProtoEnum: v0.TrustLevel = v0.TrustLevel.Ordinary + override def rank: Byte = 0; + } + case object Vip extends TrustLevel { + override def toProtoEnum: v0.TrustLevel = v0.TrustLevel.Vip + override def rank: Byte = 1; + } + + def fromProtoEnum(value: v0.TrustLevel): ParsingResult[TrustLevel] = + value match { + case v0.TrustLevel.Vip => Right(Vip) + case v0.TrustLevel.Ordinary => Right(Ordinary) + case v0.TrustLevel.MissingTrustLevel => Left(FieldNotSet("trustLevel")) + case v0.TrustLevel.Unrecognized(x) => Left(UnrecognizedEnum("trustLevel", x)) + } + + implicit val orderingTrustLevel: Ordering[TrustLevel] = Ordering.by[TrustLevel, Byte](_.rank) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/SignedTopologyTransaction.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/SignedTopologyTransaction.scala new file mode 100644 index 0000000000..eb217c7ad5 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/SignedTopologyTransaction.scala @@ -0,0 +1,203 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.transaction + +import cats.data.EitherT +import cats.syntax.either.* +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.logging.pretty.PrettyInstances.* +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.v0 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{ProtoConverter, ProtocolVersionedMemoizedEvidence} +import com.digitalasset.canton.store.db.DbSerializationException +import com.digitalasset.canton.topology.DomainId +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.version.{ + HasMemoizedProtocolVersionedWrapperCompanion, + HasProtocolVersionedWrapper, + ProtoVersion, + ProtocolVersion, + RepresentativeProtocolVersion, +} +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.ByteString +import slick.jdbc.{GetResult, PositionedParameters, SetParameter} + +import scala.concurrent.{ExecutionContext, Future} + +/** A signed topology transaction + * + * Every topology transaction needs to be authorized by an appropriate key. This object represents such + * an authorization, where there is a signature of a given key of the given topology transaction. + * + * Whether the key is eligible to authorize the topology transaction depends on the topology state + */ +@SuppressWarnings(Array("org.wartremover.warts.FinalCaseClass")) // This class is mocked in tests +case class SignedTopologyTransaction[+Op <: TopologyChangeOp] private ( + transaction: TopologyTransaction[Op], + key: SigningPublicKey, + signature: Signature, +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + SignedTopologyTransaction.type + ], + override val deserializedFrom: Option[ByteString] = None, +) extends HasProtocolVersionedWrapper[SignedTopologyTransaction[TopologyChangeOp]] + with ProtocolVersionedMemoizedEvidence + with Product + with Serializable + with PrettyPrinting { + + override protected def toByteStringUnmemoized: ByteString = + super[HasProtocolVersionedWrapper].toByteString + + @transient override protected lazy val companionObj: SignedTopologyTransaction.type = + SignedTopologyTransaction + + private def toProtoV0: v0.SignedTopologyTransaction = + v0.SignedTopologyTransaction( + transaction = transaction.getCryptographicEvidence, + key = Some(key.toProtoV0), + signature = Some(signature.toProtoV0), + ) + + def verifySignature(pureApi: CryptoPureApi): Either[SignatureCheckError, Unit] = { + val hash = transaction.hashToSign(pureApi) + pureApi.verifySignature(hash, key, signature) + } + + @VisibleForTesting + def update[NewOp >: Op <: TopologyChangeOp]( + transaction: TopologyTransaction[NewOp] = transaction, + key: SigningPublicKey = key, + signature: Signature = signature, + ): SignedTopologyTransaction[NewOp] = + this.copy(transaction = transaction, key = key, signature = signature)( + representativeProtocolVersion, + None, + ) + + override def pretty: Pretty[SignedTopologyTransaction.this.type] = + prettyOfClass(unnamedParam(_.transaction), param("key", _.key)) + + def uniquePath: UniquePath = transaction.element.uniquePath + + def operation: Op = transaction.op + + def restrictedToDomain: Option[DomainId] = transaction.element.mapping.restrictedToDomain +} + +object SignedTopologyTransaction + extends HasMemoizedProtocolVersionedWrapperCompanion[SignedTopologyTransaction[ + TopologyChangeOp + ]] { + override val name: String = "SignedTopologyTransaction" + + type GenericSignedTopologyTransaction = SignedTopologyTransaction[TopologyChangeOp] + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> VersionedProtoConverter(ProtocolVersion.v30)(v0.SignedTopologyTransaction)( + supportedProtoVersionMemoized(_)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + import com.digitalasset.canton.resource.DbStorage.Implicits.* + + def apply[Op <: TopologyChangeOp]( + transaction: TopologyTransaction[Op], + key: SigningPublicKey, + signature: Signature, + rpv: RepresentativeProtocolVersion[SignedTopologyTransaction.type], + ): SignedTopologyTransaction[Op] = + SignedTopologyTransaction(transaction, key, signature)(rpv, None) + + /** Sign the given topology transaction. */ + def create[Op <: TopologyChangeOp]( + transaction: TopologyTransaction[Op], + signingKey: SigningPublicKey, + hashOps: HashOps, + crypto: CryptoPrivateApi, + protocolVersion: ProtocolVersion, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[Future, SigningError, SignedTopologyTransaction[Op]] = + for { + signature <- crypto.sign(transaction.hashToSign(hashOps), signingKey.id) + representativeProtocolVersion = supportedProtoVersions.protocolVersionRepresentativeFor( + protocolVersion + ) + } yield SignedTopologyTransaction(transaction, signingKey, signature)( + representativeProtocolVersion, + None, + ) + + def asVersion[Op <: TopologyChangeOp]( + signedTx: SignedTopologyTransaction[Op], + protocolVersion: ProtocolVersion, + )( + crypto: Crypto + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[Future, String, SignedTopologyTransaction[Op]] = { + val originTx = signedTx.transaction + + // Convert and resign the transaction if the topology transaction version does not match the expected version + if (!originTx.hasEquivalentVersion(protocolVersion)) { + val convertedTx = originTx.asVersion(protocolVersion) + SignedTopologyTransaction + .create( + convertedTx, + signedTx.key, + crypto.pureCrypto, + crypto.privateCrypto, + protocolVersion, + ) + .leftMap { err => + s"Failed to resign topology transaction $originTx (${originTx.representativeProtocolVersion}) for domain version $protocolVersion: $err" + } + } else + EitherT.rightT(signedTx) + } + + private def fromProtoV0(transactionP: v0.SignedTopologyTransaction)( + bytes: ByteString + ): ParsingResult[SignedTopologyTransaction[TopologyChangeOp]] = + for { + transaction <- TopologyTransaction.fromByteString(transactionP.transaction) + publicKey <- ProtoConverter.parseRequired( + SigningPublicKey.fromProtoV0, + "key", + transactionP.key, + ) + signature <- ProtoConverter.parseRequired( + Signature.fromProtoV0, + "signature", + transactionP.signature, + ) + protocolVersion = supportedProtoVersions.protocolVersionRepresentativeFor(ProtoVersion(0)) + } yield SignedTopologyTransaction(transaction, publicKey, signature)( + protocolVersion, + Some(bytes), + ) + + def createGetResultDomainTopologyTransaction + : GetResult[SignedTopologyTransaction[TopologyChangeOp]] = + GetResult { r => + fromByteString(r.<<[ByteString]) + .valueOr(err => + throw new DbSerializationException(s"Failed to deserialize TopologyTransaction: $err") + ) + } + + implicit def setParameterTopologyTransaction(implicit + setParameterByteArray: SetParameter[Array[Byte]] + ): SetParameter[SignedTopologyTransaction[TopologyChangeOp]] = { + (d: SignedTopologyTransaction[TopologyChangeOp], pp: PositionedParameters) => + pp >> d.toByteArray + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/SignedTopologyTransactionX.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/SignedTopologyTransactionX.scala new file mode 100644 index 0000000000..5b81b048bb --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/SignedTopologyTransactionX.scala @@ -0,0 +1,237 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.transaction + +import cats.data.EitherT +import cats.instances.seq.* +import cats.syntax.either.* +import cats.syntax.parallel.* +import com.daml.nonempty.NonEmpty +import com.daml.nonempty.catsinstances.* +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.v2 +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.store.db.DbSerializationException +import com.digitalasset.canton.topology.DomainId +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.FutureInstances.* +import com.digitalasset.canton.version.* +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.ByteString +import slick.jdbc.{GetResult, PositionedParameters, SetParameter} + +import scala.concurrent.{ExecutionContext, Future} +import scala.reflect.ClassTag + +/** A signed topology transaction + * + * Every topology transaction needs to be authorized by an appropriate key. This object represents such + * an authorization, where there is a signature of a given key of the given topology transaction. + * + * Whether the key is eligible to authorize the topology transaction depends on the topology state + */ +final case class SignedTopologyTransactionX[+Op <: TopologyChangeOpX, +M <: TopologyMappingX]( + transaction: TopologyTransactionX[Op, M], + signatures: NonEmpty[Set[Signature]], + isProposal: Boolean, +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + SignedTopologyTransactionX.type + ] +) extends HasProtocolVersionedWrapper[ + SignedTopologyTransactionX[TopologyChangeOpX, TopologyMappingX] + ] + with Product + with Serializable + with PrettyPrinting { + + lazy val hashOfSignatures: Hash = Hash.digest( + HashPurpose.TopologyTransactionSignature, + signatures.toList + .sortBy(_.signedBy.toProtoPrimitive) + .map(_.toProtoV0.toByteString) + .reduceLeft(_.concat(_)), + HashAlgorithm.Sha256, + ) + + def addSignatures(add: Seq[Signature]): SignedTopologyTransactionX[Op, M] = + SignedTopologyTransactionX( + transaction, + signatures ++ add, + isProposal, + )(representativeProtocolVersion) + + def operation: Op = transaction.op + + def mapping: M = transaction.mapping + + @transient override protected lazy val companionObj: SignedTopologyTransactionX.type = + SignedTopologyTransactionX + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def selectMapping[TargetMapping <: TopologyMappingX: ClassTag] + : Option[SignedTopologyTransactionX[Op, TargetMapping]] = { + transaction + .selectMapping[TargetMapping] + .map(_ => this.asInstanceOf[SignedTopologyTransactionX[Op, TargetMapping]]) + } + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def selectOp[TargetOp <: TopologyChangeOpX: ClassTag] + : Option[SignedTopologyTransactionX[TargetOp, M]] = + transaction + .selectOp[TargetOp] + .map(_ => this.asInstanceOf[SignedTopologyTransactionX[TargetOp, M]]) + + def select[TargetOp <: TopologyChangeOpX: ClassTag, TargetMapping <: TopologyMappingX: ClassTag] + : Option[SignedTopologyTransactionX[TargetOp, TargetMapping]] = { + selectMapping[TargetMapping].flatMap(_.selectOp[TargetOp]) + } + + def toProtoV2: v2.SignedTopologyTransactionX = + v2.SignedTopologyTransactionX( + transaction = transaction.getCryptographicEvidence, + signatures = signatures.toSeq.map(_.toProtoV0), + proposal = isProposal, + ) + + override def pretty: Pretty[SignedTopologyTransactionX.this.type] = + prettyOfClass( + unnamedParam(_.transaction), + param("signatures", _.signatures.map(_.signedBy)), + paramIfTrue("proposal", _.isProposal), + ) + + def restrictedToDomain: Option[DomainId] = transaction.mapping.restrictedToDomain + + @VisibleForTesting + def copy[Op2 <: TopologyChangeOpX, M2 <: TopologyMappingX]( + transaction: TopologyTransactionX[Op2, M2] = this.transaction, + signatures: NonEmpty[Set[Signature]] = this.signatures, + isProposal: Boolean = this.isProposal, + ): SignedTopologyTransactionX[Op2, M2] = + new SignedTopologyTransactionX[Op2, M2](transaction, signatures, isProposal)( + representativeProtocolVersion + ) +} + +object SignedTopologyTransactionX + extends HasProtocolVersionedCompanion[ + SignedTopologyTransactionX[TopologyChangeOpX, TopologyMappingX] + ] { + override val name: String = "SignedTopologyTransactionX" + + type GenericSignedTopologyTransactionX = + SignedTopologyTransactionX[TopologyChangeOpX, TopologyMappingX] + + type PositiveSignedTopologyTransactionX = + SignedTopologyTransactionX[TopologyChangeOpX.Replace, TopologyMappingX] + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(2) -> VersionedProtoConverter(ProtocolVersion.v30)( + v2.SignedTopologyTransactionX + )( + supportedProtoVersion(_)(fromProtoV2), + _.toProtoV2.toByteString, + ) + ) + + import com.digitalasset.canton.resource.DbStorage.Implicits.* + + /** Sign the given topology transaction. */ + def create[Op <: TopologyChangeOpX, M <: TopologyMappingX]( + transaction: TopologyTransactionX[Op, M], + signingKeys: NonEmpty[Set[Fingerprint]], + isProposal: Boolean, + crypto: CryptoPrivateApi, + protocolVersion: ProtocolVersion, + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[Future, SigningError, SignedTopologyTransactionX[Op, M]] = + for { + signaturesNE <- signingKeys.toSeq.toNEF.parTraverse( + crypto.sign(transaction.hash.hash, _) + ) + representativeProtocolVersion = supportedProtoVersions.protocolVersionRepresentativeFor( + protocolVersion + ) + } yield SignedTopologyTransactionX(transaction, signaturesNE.toSet, isProposal)( + representativeProtocolVersion + ) + + def asVersion[Op <: TopologyChangeOpX, M <: TopologyMappingX]( + signedTx: SignedTopologyTransactionX[Op, M], + protocolVersion: ProtocolVersion, + )( + crypto: Crypto + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): EitherT[Future, String, SignedTopologyTransactionX[Op, M]] = { + val originTx = signedTx.transaction + + // Convert and resign the transaction if the topology transaction version does not match the expected version + if (!originTx.isEquivalentTo(protocolVersion)) { + if (signedTx.signatures.size > 1) { + EitherT.leftT( + s"Failed to resign topology transaction $originTx with multiple signatures, as only one signature is supported" + ) + } else { + val convertedTx = originTx.asVersion(protocolVersion) + SignedTopologyTransactionX + .create( + convertedTx, + signedTx.signatures.map(_.signedBy), + signedTx.isProposal, + crypto.privateCrypto, + protocolVersion, + ) + .leftMap { err => + s"Failed to resign topology transaction $originTx (${originTx.representativeProtocolVersion}) for domain version $protocolVersion: $err" + } + } + } else + EitherT.rightT(signedTx) + } + + def fromProtoV2( + transactionP: v2.SignedTopologyTransactionX + ): ParsingResult[GenericSignedTopologyTransactionX] = { + val v2.SignedTopologyTransactionX(txBytes, signaturesP, isProposal) = transactionP + for { + + transaction <- TopologyTransactionX.fromByteString(txBytes) + signatures <- ProtoConverter.parseRequiredNonEmpty( + Signature.fromProtoV0, + "SignedTopologyTransactionX.signatures", + signaturesP, + ) + protocolVersion = supportedProtoVersions.protocolVersionRepresentativeFor(ProtoVersion(2)) + + } yield SignedTopologyTransactionX(transaction, signatures.toSet, isProposal)( + protocolVersion + ) + + } + + def createGetResultDomainTopologyTransaction: GetResult[GenericSignedTopologyTransactionX] = + GetResult { r => + fromByteString(r.<<[ByteString]) + .valueOr(err => + throw new DbSerializationException( + s"Failed to deserialize SignedTopologyTransactionX: $err" + ) + ) + } + + implicit def setParameterTopologyTransaction(implicit + setParameterByteArray: SetParameter[Array[Byte]] + ): SetParameter[GenericSignedTopologyTransactionX] = { + (d: GenericSignedTopologyTransactionX, pp: PositionedParameters) => + pp >> d.toByteArray + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala new file mode 100644 index 0000000000..0bba252f6d --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala @@ -0,0 +1,644 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.transaction + +import cats.syntax.either.* +import cats.syntax.option.* +import cats.syntax.traverse.* +import com.digitalasset.canton.ProtoDeserializationError.{FieldNotSet, UnrecognizedEnum} +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.logging.pretty.PrettyInstances.* +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.{DynamicDomainParameters, v0, v1} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{ProtoConverter, ProtocolVersionedMemoizedEvidence} +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.version.* +import com.digitalasset.canton.{LfPackageId, ProtoDeserializationError} +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.ByteString + +/** X -> Y */ +sealed trait TopologyMapping extends Product with Serializable with PrettyPrinting { + + override def pretty: Pretty[this.type] = adHocPrettyInstance + def uniquePath(elementId: TopologyElementId): UniquePath + def dbType: DomainTopologyTransactionType + def requiredAuth: RequiredAuth + + /** Secondary uid for cascading namespace updates + * + * During topology processing (in 2.x), we need in case of cascading updates to fetch + * all transactions that might be affected due to a new namespace or identifier mapping. + * + * Now, the topology transactions have a primary uid (i.e. the party id) within the unique path + * which is used to index the data, but can have a secondary uid (the participant). We store them by + * the primary uid but during cascading updates, we actually have to fetch them using the secondary uid. + * + * Only txs with RequestSide can have a secondary uid. + * + * In 3.x this is simpler, as we removed cascading additions. So all this logic can be deleted soon again ... + */ + def secondaryUid: Option[UniqueIdentifier] = None + + /** Returns true if the new mapping would be a replacement for the given mapping */ + def isReplacedBy(mapping: TopologyMapping): Boolean = false + + def restrictedToDomain: Option[DomainId] = None + +} + +sealed trait TopologyStateUpdateMapping extends TopologyMapping +sealed trait DomainGovernanceMapping extends TopologyMapping { + def domainId: DomainId + + override def uniquePath( + _elementId: TopologyElementId + ): UniquePathSignedDomainGovernanceTransaction = + UniquePathSignedDomainGovernanceTransaction(domainId.unwrap, dbType) +} + +/** A namespace delegation transaction (intermediate CA) + * + * Entrusts a public-key to perform changes on the namespace + * {(*,I) => p_k} + * + * If the delegation is a root delegation, then the target key + * inherits the right to authorize other NamespaceDelegations. + */ +// architecture-handbook-entry-begin: NamespaceDelegation +final case class NamespaceDelegation( + namespace: Namespace, + target: SigningPublicKey, + isRootDelegation: Boolean, +) extends TopologyStateUpdateMapping { + // architecture-handbook-entry-end: NamespaceDelegation + // TODO(i12892): Add a private constructor, private apply, and factory method to check constraint + require( + isRootDelegation || namespace.fingerprint != target.fingerprint, + s"Root certificate for $namespace needs to be set as isRootDelegation = true", + ) + + def toProtoV0: v0.NamespaceDelegation = + v0.NamespaceDelegation( + namespace = namespace.fingerprint.unwrap, + targetKey = Some(target.toProtoV0), + isRootDelegation = isRootDelegation, + ) + + // TODO(i4933) include hash over content + override def uniquePath(id: TopologyElementId): UniquePath = + UniquePathNamespaceDelegation(namespace, id) + + override def dbType: DomainTopologyTransactionType = NamespaceDelegation.dbType + + override def requiredAuth: RequiredAuth = + RequiredAuth.Ns(namespace, true) + +} + +object NamespaceDelegation { + + def dbType: DomainTopologyTransactionType = DomainTopologyTransactionType.NamespaceDelegation + + /** Returns true if the given transaction is a self-signed root certificate */ + def isRootCertificate(sit: SignedTopologyTransaction[TopologyChangeOp]): Boolean = + sit.transaction.element.mapping match { + case nd: NamespaceDelegation => + nd.namespace.fingerprint == sit.key.fingerprint && nd.isRootDelegation && nd.target.fingerprint == nd.namespace.fingerprint && + sit.operation == TopologyChangeOp.Add + case _ => false + } + + def fromProtoV0( + value: v0.NamespaceDelegation + ): ParsingResult[NamespaceDelegation] = + for { + namespace <- Fingerprint.fromProtoPrimitive(value.namespace).map(Namespace(_)) + target <- ProtoConverter.parseRequired( + SigningPublicKey.fromProtoV0, + "target_key", + value.targetKey, + ) + } yield NamespaceDelegation(namespace, target, value.isRootDelegation) +} + +/** An identifier delegation + * + * entrusts a public-key to do any change with respect to the identifier + * {(X,I) => p_k} + */ +// architecture-handbook-entry-begin: IdentifierDelegation +final case class IdentifierDelegation(identifier: UniqueIdentifier, target: SigningPublicKey) + extends TopologyStateUpdateMapping { + // architecture-handbook-entry-end: IdentifierDelegation + def toProtoV0: v0.IdentifierDelegation = + v0.IdentifierDelegation( + uniqueIdentifier = identifier.toProtoPrimitive, + targetKey = Some(target.toProtoV0), + ) + + // TODO(i4933) include hash over content + override def uniquePath(id: TopologyElementId): UniquePath = + UniquePathSignedTopologyTransaction(identifier, dbType, id) + + override def dbType: DomainTopologyTransactionType = IdentifierDelegation.dbType + + override def requiredAuth: RequiredAuth = RequiredAuth.Ns(identifier.namespace, false) + +} + +object IdentifierDelegation { + + def dbType: DomainTopologyTransactionType = DomainTopologyTransactionType.IdentifierDelegation + + def fromProtoV0( + value: v0.IdentifierDelegation + ): ParsingResult[IdentifierDelegation] = + for { + identifier <- UniqueIdentifier.fromProtoPrimitive(value.uniqueIdentifier, "uniqueIdentifier") + target <- ProtoConverter.parseRequired( + SigningPublicKey.fromProtoV0, + "target_key", + value.targetKey, + ) + } yield IdentifierDelegation(identifier, target) +} + +/** A key owner (participant, mediator, sequencer, manager) to key mapping + * + * In Canton, we need to know keys for all participating entities. The entities are + * all the protocol members (participant, mediator, topology manager) plus the + * sequencer (which provides the communication infrastructure for the members). + */ +// architecture-handbook-entry-begin: OwnerToKeyMapping +final case class OwnerToKeyMapping(owner: Member, key: PublicKey) + extends TopologyStateUpdateMapping { + // architecture-handbook-entry-end: OwnerToKeyMapping + def toProtoV0: v0.OwnerToKeyMapping = + v0.OwnerToKeyMapping( + keyOwner = owner.toProtoPrimitive, + publicKey = Some(key.toProtoPublicKeyV0), + ) + + override def uniquePath(id: TopologyElementId): UniquePath = + // TODO(i4933) include hash over content + UniquePathSignedTopologyTransaction(owner.uid, dbType, id) + + override def dbType: DomainTopologyTransactionType = OwnerToKeyMapping.dbType + override def requiredAuth: RequiredAuth = RequiredAuth.Uid(Seq(owner.uid)) + +} + +object OwnerToKeyMapping { + + def dbType: DomainTopologyTransactionType = DomainTopologyTransactionType.OwnerToKeyMapping + + def fromProtoV0( + value: v0.OwnerToKeyMapping + ): ParsingResult[OwnerToKeyMapping] = + for { + owner <- Member.fromProtoPrimitive(value.keyOwner, "keyOwner") + key <- ProtoConverter + .parseRequired(PublicKey.fromProtoPublicKeyV0, "public_key", value.publicKey) + } yield OwnerToKeyMapping(owner, key) + +} + +// Using private because the `claim` needs to be a `LegalIdentityClaim` +final case class SignedLegalIdentityClaim private ( + uid: UniqueIdentifier, + claim: ByteString, + signature: Signature, +) extends TopologyStateUpdateMapping + with PrettyPrinting { + def toProtoV0: v0.SignedLegalIdentityClaim = + v0.SignedLegalIdentityClaim( + claim = claim, + signature = signature.toProtoV0.some, + ) + + override def pretty: Pretty[SignedLegalIdentityClaim] = + prettyOfClass(param("signature", _.signature), paramWithoutValue("claim")) + + override def uniquePath(id: TopologyElementId): UniquePath = + // TODO(i4933) include hash over content + UniquePathSignedTopologyTransaction(uid, dbType, id) + + override def dbType: DomainTopologyTransactionType = SignedLegalIdentityClaim.dbType + + override def requiredAuth: RequiredAuth = RequiredAuth.Uid(Seq(uid)) + +} + +object SignedLegalIdentityClaim { + + def dbType: DomainTopologyTransactionType = DomainTopologyTransactionType.SignedLegalIdentityClaim + + @VisibleForTesting + def create(claim: LegalIdentityClaim, signature: Signature): SignedLegalIdentityClaim = + SignedLegalIdentityClaim(claim.uid, claim.toByteString, signature) + + def fromProtoV0( + value: v0.SignedLegalIdentityClaim + ): ParsingResult[SignedLegalIdentityClaim] = + for { + signature <- ProtoConverter.parseRequired(Signature.fromProtoV0, "signature", value.signature) + claim <- LegalIdentityClaim.fromByteString(value.claim) + } yield SignedLegalIdentityClaim(claim.uid, value.claim, signature) +} + +final case class LegalIdentityClaim private ( + uid: UniqueIdentifier, + evidence: LegalIdentityClaimEvidence, +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + LegalIdentityClaim.type + ], + override val deserializedFrom: Option[ByteString], +) extends ProtocolVersionedMemoizedEvidence + with HasProtocolVersionedWrapper[LegalIdentityClaim] { + @transient override protected lazy val companionObj: LegalIdentityClaim.type = LegalIdentityClaim + + protected def toProtoV0: v0.LegalIdentityClaim = + v0.LegalIdentityClaim( + uniqueIdentifier = uid.toProtoPrimitive, + evidence = evidence.toProtoOneOf, + ) + + def hash(hashOps: HashOps): Hash = + hashOps.digest(HashPurpose.LegalIdentityClaim, getCryptographicEvidence) + + override protected def toByteStringUnmemoized: ByteString = + super[HasProtocolVersionedWrapper].toByteString +} + +object LegalIdentityClaim extends HasMemoizedProtocolVersionedWrapperCompanion[LegalIdentityClaim] { + override val name: String = "LegalIdentityClaim" + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> VersionedProtoConverter(ProtocolVersion.v30)(v0.LegalIdentityClaim)( + supportedProtoVersionMemoized(_)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + def create( + uid: UniqueIdentifier, + evidence: LegalIdentityClaimEvidence, + protocolVersion: ProtocolVersion, + ): LegalIdentityClaim = + LegalIdentityClaim(uid, evidence)( + protocolVersionRepresentativeFor(protocolVersion), + None, + ) + + private def fromProtoV0( + claimP: v0.LegalIdentityClaim + )(bytes: ByteString): ParsingResult[LegalIdentityClaim] = + for { + uid <- UniqueIdentifier.fromProtoPrimitive(claimP.uniqueIdentifier, "uniqueIdentifier") + evidence <- LegalIdentityClaimEvidence.fromProtoOneOf(claimP.evidence) + } yield LegalIdentityClaim(uid, evidence)( + protocolVersionRepresentativeFor(ProtoVersion(0)), + Some(bytes), + ) +} + +sealed trait LegalIdentityClaimEvidence { + def toProtoOneOf: v0.LegalIdentityClaim.Evidence +} + +object LegalIdentityClaimEvidence { + final case class X509Cert(pem: X509CertificatePem) extends LegalIdentityClaimEvidence { + override def toProtoOneOf: v0.LegalIdentityClaim.Evidence.X509Cert = + v0.LegalIdentityClaim.Evidence.X509Cert(pem.unwrap) + } + + def fromProtoOneOf( + evidenceP: v0.LegalIdentityClaim.Evidence + ): ParsingResult[LegalIdentityClaimEvidence] = { + evidenceP match { + case v0.LegalIdentityClaim.Evidence.X509Cert(pem) => + X509CertificatePem + .fromBytes(pem) + .map(X509Cert) + .leftMap(err => ProtoDeserializationError.OtherError(s"Failed to parse PEM: $err")) + case v0.LegalIdentityClaim.Evidence.Empty => FieldNotSet("LegalIdentityClaim.evidence").asLeft + } + } + +} + +/** Side of the party to participant mapping request + * + * Party to participant mapping request need to be approved by both namespaces if the namespaces are different. + * We support this by allowing to split the signatures into two transactions (so both namespace controller sign the + * same transaction, just with different "RequestSide" + * + * {Both, +, (P,I) -> (N,J)}^[s_I, s_J] = {From,+, (P,I) -> (N,J)}^[s_I] + {To,+, (P,I) -> (N,J)}^[s_J] + */ +sealed trait RequestSide { + + def toProtoEnum: v0.RequestSide + + def requiredAuth(left: UniqueIdentifier, right: UniqueIdentifier): RequiredAuth + +} + +object RequestSide { + + case object From extends RequestSide { + val toProtoEnum = v0.RequestSide.From + override def requiredAuth(left: UniqueIdentifier, right: UniqueIdentifier): RequiredAuth = + RequiredAuth.Uid(Seq(left)) + } + case object To extends RequestSide { + val toProtoEnum = v0.RequestSide.To + override def requiredAuth(left: UniqueIdentifier, right: UniqueIdentifier): RequiredAuth = + RequiredAuth.Uid(Seq(right)) + } + case object Both extends RequestSide { + val toProtoEnum = v0.RequestSide.Both + override def requiredAuth(left: UniqueIdentifier, right: UniqueIdentifier): RequiredAuth = + RequiredAuth.Uid(Seq(left, right)) + } + + /* flips the request side (From becomes To and To becomes From). If Both is passed, an exception is thrown. */ + def flip(side: RequestSide): RequestSide = side match { + case From => To + case To => From + case Both => + throw new IllegalArgumentException("should never flip request side of type " + Both.toString) + } + + def fromProtoEnum(side: v0.RequestSide): ParsingResult[RequestSide] = + side match { + case v0.RequestSide.Both => Right(RequestSide.Both) + case v0.RequestSide.From => Right(RequestSide.From) + case v0.RequestSide.To => Right(RequestSide.To) + case v0.RequestSide.MissingRequestSide => Left(FieldNotSet(side.name)) + case v0.RequestSide.Unrecognized(x) => Left(UnrecognizedEnum(side.name, x)) + } + + /** sides accumulator, used in folds in order to figure out if we've seen both sides */ + def accumulateSide(cur: (Boolean, Boolean), side: RequestSide): (Boolean, Boolean) = + (cur, side) match { + case (_, RequestSide.Both) => (true, true) + case ((_, rght), RequestSide.From) => (true, rght) + case ((lft, _), RequestSide.To) => (lft, true) + } + +} + +// architecture-handbook-entry-begin: ParticipantState +final case class ParticipantState( + side: RequestSide, + domain: DomainId, + participant: ParticipantId, + permission: ParticipantPermission, + trustLevel: TrustLevel, +) extends TopologyStateUpdateMapping { + + require( + permission.canConfirm || trustLevel == TrustLevel.Ordinary, + "participant trust level must either be ordinary or permission must be confirming", + ) + // architecture-handbook-entry-end: ParticipantState + + def toParticipantAttributes: ParticipantAttributes = ParticipantAttributes(permission, trustLevel) + + def toProtoV0: v0.ParticipantState = { + v0.ParticipantState( + side = side.toProtoEnum, + domain = domain.toProtoPrimitive, + participant = participant.uid.toProtoPrimitive, + permission = permission.toProtoEnum, + trustLevel = trustLevel.toProtoEnum, + ) + } + + override def uniquePath(id: TopologyElementId): UniquePath = { + // TODO(i4933) include hash over content and include domain-id in the path + UniquePathSignedTopologyTransaction(participant.uid, dbType, id) + } + + override def dbType: DomainTopologyTransactionType = ParticipantState.dbType + override def requiredAuth: RequiredAuth = side.requiredAuth(domain.unwrap, participant.uid) + + override def secondaryUid: Option[UniqueIdentifier] = + if (side != RequestSide.To) domain.uid.some else None + + override def isReplacedBy(mapping: TopologyMapping): Boolean = mapping match { + case other: ParticipantState => + def subset(mp: ParticipantState) = (mp.side, mp.domain, mp.participant) + subset(other) == subset(this) + case _ => false + } + + override def restrictedToDomain: Option[DomainId] = Some(domain) + +} + +object ParticipantState { + + def dbType: DomainTopologyTransactionType = DomainTopologyTransactionType.ParticipantState + + def fromProtoV0( + parsed: v0.ParticipantState + ): ParsingResult[ParticipantState] = + for { + side <- RequestSide.fromProtoEnum(parsed.side) + domain <- DomainId.fromProtoPrimitive(parsed.domain, "domain") + permission <- ParticipantPermission.fromProtoEnum(parsed.permission) + trustLevel <- TrustLevel.fromProtoEnum(parsed.trustLevel) + uid <- UniqueIdentifier.fromProtoPrimitive(parsed.participant, "participant") + } yield ParticipantState(side, domain, ParticipantId(uid), permission, trustLevel) + +} + +// architecture-handbook-entry-begin: MediatorDomainState +final case class MediatorDomainState( + side: RequestSide, + domain: DomainId, + mediator: MediatorId, +) extends TopologyStateUpdateMapping { + + // architecture-handbook-entry-end: MediatorDomainState + + def toProtoV0: v0.MediatorDomainState = { + v0.MediatorDomainState( + side = side.toProtoEnum, + domain = domain.toProtoPrimitive, + mediator = mediator.uid.toProtoPrimitive, + ) + } + + override def uniquePath(id: TopologyElementId): UniquePath = { + // TODO(i4933) include hash over content and include domain-id in the path + UniquePathSignedTopologyTransaction(mediator.uid, dbType, id) + } + + override def secondaryUid: Option[UniqueIdentifier] = + if (side != RequestSide.From) mediator.uid.some else None + + override def dbType: DomainTopologyTransactionType = MediatorDomainState.dbType + override def requiredAuth: RequiredAuth = side.requiredAuth(domain.unwrap, mediator.uid) + + override def isReplacedBy(mapping: TopologyMapping): Boolean = mapping match { + case other: MediatorDomainState => + def subset(mp: MediatorDomainState) = (mp.side, mp.domain, mp.mediator) + subset(other) == subset(this) + case _ => false + } + + override def restrictedToDomain: Option[DomainId] = Some(domain) + +} + +object MediatorDomainState { + + def dbType: DomainTopologyTransactionType = DomainTopologyTransactionType.MediatorDomainState + + def fromProtoV0( + parsed: v0.MediatorDomainState + ): ParsingResult[MediatorDomainState] = + for { + side <- RequestSide.fromProtoEnum(parsed.side) + domain <- DomainId.fromProtoPrimitive(parsed.domain, "domain") + uid <- UniqueIdentifier.fromProtoPrimitive(parsed.mediator, "mediator") + } yield MediatorDomainState(side, domain, MediatorId(uid)) + +} + +/** party to participant mapping + * + * We can map a party to several participants at the same time. We represent such a + * mapping in the topology state using the party to participant + */ +// architecture-handbook-entry-begin: PartyToParticipant +final case class PartyToParticipant( + side: RequestSide, + party: PartyId, + participant: ParticipantId, + permission: ParticipantPermission, +) extends TopologyStateUpdateMapping { + // architecture-handbook-entry-end: PartyToParticipant + + require( + party.uid != participant.uid, + s"Unable to allocate party ${party.uid}, as it has the same name as the participant's admin party.", + ) + + def toProtoV0: v0.PartyToParticipant = + v0.PartyToParticipant( + side = side.toProtoEnum, + party = party.toProtoPrimitive, + participant = participant.toProtoPrimitive, + permission = permission.toProtoEnum, + ) + + override def uniquePath(id: TopologyElementId): UniquePath = + // TODO(i4933) include hash over content + UniquePathSignedTopologyTransaction(party.uid, dbType, id) + + override def dbType: DomainTopologyTransactionType = PartyToParticipant.dbType + + override def requiredAuth: RequiredAuth = side.requiredAuth(party.uid, participant.uid) + + override def secondaryUid: Option[UniqueIdentifier] = + if (side != RequestSide.From) participant.uid.some else None + + override def isReplacedBy(mapping: TopologyMapping): Boolean = mapping match { + case other: PartyToParticipant => + def subset(mp: PartyToParticipant) = (mp.side, mp.party, mp.participant) + subset(other) == subset(this) + case _ => false + } + +} + +object PartyToParticipant { + + def dbType: DomainTopologyTransactionType = DomainTopologyTransactionType.PartyToParticipant + + def fromProtoV0( + value: v0.PartyToParticipant + ): ParsingResult[PartyToParticipant] = { + val v0.PartyToParticipant(sideP, partyP, participantP, permissionP) = value + for { + partyUid <- UniqueIdentifier.fromProtoPrimitive(partyP, "party") + participant <- ParticipantId.fromProtoPrimitive(participantP, "participant") + side <- RequestSide.fromProtoEnum(sideP) + permission <- ParticipantPermission.fromProtoEnum(permissionP) + } yield PartyToParticipant(side, PartyId(partyUid), participant, permission) + } + +} + +final case class VettedPackages(participant: ParticipantId, packageIds: Seq[LfPackageId]) + extends TopologyStateUpdateMapping + with PrettyPrinting { + def toProtoV0: v0.VettedPackages = + v0.VettedPackages( + participant = + participant.uid.toProtoPrimitive, // use UID proto, not participant proto (as this would be Member.toProtoPrimitive) which includes the unnecessary code + packageIds = packageIds, + ) + + override def pretty: Pretty[VettedPackages] = + prettyOfClass(param("participant", _.participant.uid), param("packages", _.packageIds)) + + override def uniquePath(id: TopologyElementId): UniquePath = + // TODO(i4933) include hash over content + UniquePathSignedTopologyTransaction(participant.uid, dbType, id) + + override def dbType: DomainTopologyTransactionType = VettedPackages.dbType + + override def requiredAuth: RequiredAuth = RequiredAuth.Uid(Seq(participant.uid)) + +} + +object VettedPackages { + val dbType: DomainTopologyTransactionType = DomainTopologyTransactionType.PackageUse + + def fromProtoV0(value: v0.VettedPackages): ParsingResult[VettedPackages] = { + val v0.VettedPackages(participantP, packagesP) = value + for { + uid <- UniqueIdentifier.fromProtoPrimitive(participantP, "participant") + packageIds <- packagesP + .traverse(LfPackageId.fromString) + .leftMap(ProtoDeserializationError.ValueConversionError("package_ids", _)) + } yield VettedPackages(ParticipantId(uid), packageIds) + } + +} + +final case class DomainParametersChange( + domainId: DomainId, + domainParameters: DynamicDomainParameters, +) extends DomainGovernanceMapping { + private[transaction] def toProtoV1: v1.DomainParametersChange = v1.DomainParametersChange( + domain = domainId.toProtoPrimitive, + Option(domainParameters.toProtoV2), + ) + + override def dbType: DomainTopologyTransactionType = DomainParametersChange.dbType + + override def requiredAuth: RequiredAuth = RequiredAuth.Uid(Seq(domainId.unwrap)) +} + +object DomainParametersChange { + val dbType: DomainTopologyTransactionType = DomainTopologyTransactionType.DomainParameters + + private[transaction] def fromProtoV1( + value: v1.DomainParametersChange + ): ParsingResult[DomainParametersChange] = { + for { + uid <- UniqueIdentifier.fromProtoPrimitive(value.domain, "domain") + domainParametersXP <- value.domainParameters.toRight(FieldNotSet("domainParameters")) + domainParameters <- DynamicDomainParameters.fromProtoV2(domainParametersXP) + } yield DomainParametersChange(DomainId(uid), domainParameters) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingX.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingX.scala new file mode 100644 index 0000000000..cb257bfdba --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingX.scala @@ -0,0 +1,1552 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.transaction + +import cats.Monoid +import cats.syntax.either.* +import cats.syntax.option.* +import cats.syntax.traverse.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.ProtoDeserializationError.{ + FieldNotSet, + InvariantViolation, + UnrecognizedEnum, +} +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt, PositiveLong} +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.v2.TopologyMappingX.Mapping +import com.digitalasset.canton.protocol.{DynamicDomainParameters, v2} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX +import com.digitalasset.canton.topology.transaction.TopologyMappingX.RequiredAuthX.* +import com.digitalasset.canton.topology.transaction.TopologyMappingX.{ + Code, + MappingHash, + RequiredAuthX, +} +import com.digitalasset.canton.util.OptionUtil +import com.digitalasset.canton.{LfPackageId, ProtoDeserializationError} +import com.google.common.annotations.VisibleForTesting +import slick.jdbc.SetParameter + +import scala.reflect.ClassTag + +sealed trait TopologyMappingX extends Product with Serializable with PrettyPrinting { + + override def pretty: Pretty[this.type] = adHocPrettyInstance + + /** Returns the code used to store & index this mapping */ + def code: Code + + /** The "primary" namespace authorizing the topology mapping. + * Used for filtering query results. + */ + def namespace: Namespace + + /** The "primary" identity authorizing the topology mapping, optional as some mappings (namespace delegations and + * unionspace definitions) only have a namespace + * Used for filtering query results. + */ + def maybeUid: Option[UniqueIdentifier] + + /** Returns authorization information + * + * Each topology transaction must be authorized directly or indirectly by + * all necessary controllers of the given namespace. + * + * @param previous the previously validly authorized state (some state changes only need subsets of the authorizers) + */ + def requiredAuth( + previous: Option[TopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] + ): RequiredAuthX + + def restrictedToDomain: Option[DomainId] + + def toProtoV2: v2.TopologyMappingX + + lazy val uniqueKey: MappingHash = { + // TODO(#14048) use different hash purpose (this one isn't used anymore) + MappingHash( + addUniqueKeyToBuilder( + Hash.build(HashPurpose.DomainTopologyTransactionMessageSignature, HashAlgorithm.Sha256) + ).add(code.dbInt) + .finish() + ) + } + + final def select[TargetMapping <: TopologyMappingX](implicit + M: ClassTag[TargetMapping] + ): Option[TargetMapping] = M.unapply(this) + + /** Returns a hash builder based on the values of the topology mapping that needs to be unique */ + protected def addUniqueKeyToBuilder(builder: HashBuilder): HashBuilder + +} + +object TopologyMappingX { + + final case class MappingHash(hash: Hash) extends AnyVal + + sealed case class Code private (dbInt: Int, code: String) + object Code { + + object NamespaceDelegationX extends Code(1, "nsd") + object IdentifierDelegationX extends Code(2, "idd") + object UnionspaceDefinitionX extends Code(3, "usd") + + object OwnerToKeyMappingX extends Code(4, "otk") + + object DomainTrustCertificateX extends Code(5, "dtc") + object ParticipantDomainPermissionX extends Code(6, "pdp") + object PartyHostingLimitsX extends Code(7, "phl") + object VettedPackagesX extends Code(8, "vtp") + + object PartyToParticipantX extends Code(9, "ptp") + object AuthorityOfX extends Code(10, "auo") + + object DomainParametersStateX extends Code(11, "dop") + object MediatorDomainStateX extends Code(12, "mds") + object SequencerDomainStateX extends Code(13, "sds") + object OffboardParticipantX extends Code(14, "ofp") + + object PurgeTopologyTransactionX extends Code(15, "ptt") + object TrafficControlStateX extends Code(16, "tcs") + + lazy val all = Seq( + NamespaceDelegationX, + IdentifierDelegationX, + UnionspaceDefinitionX, + OwnerToKeyMappingX, + DomainTrustCertificateX, + ParticipantDomainPermissionX, + VettedPackagesX, + PartyToParticipantX, + AuthorityOfX, + DomainParametersStateX, + MediatorDomainStateX, + SequencerDomainStateX, + OffboardParticipantX, + PurgeTopologyTransactionX, + TrafficControlStateX, + ) + + implicit val setParameterTopologyMappingCode: SetParameter[Code] = + (v, pp) => pp.setInt(v.dbInt) + + } + + // Small wrapper to not have to work with (Set[Namespace], Set[Namespace], Set[Uid]) + final case class RequiredAuthXAuthorizations( + namespacesWithRoot: Set[Namespace] = Set.empty, + namespaces: Set[Namespace] = Set.empty, + uids: Set[UniqueIdentifier] = Set.empty, + ) { + def isEmpty: Boolean = namespacesWithRoot.isEmpty && namespaces.isEmpty && uids.isEmpty + } + + object RequiredAuthXAuthorizations { + + val empty: RequiredAuthXAuthorizations = RequiredAuthXAuthorizations() + + implicit val monoid: Monoid[RequiredAuthXAuthorizations] = + new Monoid[RequiredAuthXAuthorizations] { + override def empty: RequiredAuthXAuthorizations = RequiredAuthXAuthorizations.empty + + override def combine( + x: RequiredAuthXAuthorizations, + y: RequiredAuthXAuthorizations, + ): RequiredAuthXAuthorizations = + RequiredAuthXAuthorizations( + namespacesWithRoot = x.namespacesWithRoot ++ y.namespacesWithRoot, + namespaces = x.namespaces ++ y.namespaces, + uids = x.uids ++ y.uids, + ) + } + } + + sealed trait RequiredAuthX { + def requireRootDelegation: Boolean = false + def satisfiedByActualAuthorizers( + namespacesWithRoot: Set[Namespace], + namespaces: Set[Namespace], + uids: Set[UniqueIdentifier], + ): Either[RequiredAuthXAuthorizations, Unit] + + final def and(next: RequiredAuthX): RequiredAuthX = + RequiredAuthX.And(this, next) + final def or(next: RequiredAuthX): RequiredAuthX = + RequiredAuthX.Or(this, next) + + final def foldMap[T]( + namespaceCheck: RequiredNamespaces => T, + uidCheck: RequiredUids => T, + )(implicit T: Monoid[T]): T = { + def loop(x: RequiredAuthX): T = x match { + case ns @ RequiredNamespaces(_, _) => namespaceCheck(ns) + case uids @ RequiredUids(_) => uidCheck(uids) + case EmptyAuthorization => T.empty + case And(first, second) => T.combine(loop(first), loop(second)) + case Or(first, second) => + val firstRes = loop(first) + if (firstRes == T.empty) loop(second) + else firstRes + } + loop(this) + } + + def authorizations: RequiredAuthXAuthorizations + } + + object RequiredAuthX { + + private[transaction] case object EmptyAuthorization extends RequiredAuthX { + override def satisfiedByActualAuthorizers( + namespacesWithRoot: Set[Namespace], + namespaces: Set[Namespace], + uids: Set[UniqueIdentifier], + ): Either[RequiredAuthXAuthorizations, Unit] = Either.unit + + override def authorizations: RequiredAuthXAuthorizations = RequiredAuthXAuthorizations() + } + + final case class RequiredNamespaces( + namespaces: Set[Namespace], + override val requireRootDelegation: Boolean = false, + ) extends RequiredAuthX { + override def satisfiedByActualAuthorizers( + providedNamespacesWithRoot: Set[Namespace], + providedNamespaces: Set[Namespace], + uids: Set[UniqueIdentifier], + ): Either[RequiredAuthXAuthorizations, Unit] = { + val filter = if (requireRootDelegation) providedNamespacesWithRoot else providedNamespaces + val missing = namespaces.filter(ns => !filter(ns)) + Either.cond( + missing.isEmpty, + (), + RequiredAuthXAuthorizations( + namespacesWithRoot = if (requireRootDelegation) missing else Set.empty, + namespaces = if (requireRootDelegation) Set.empty else missing, + ), + ) + } + + override def authorizations: RequiredAuthXAuthorizations = RequiredAuthXAuthorizations( + namespacesWithRoot = if (requireRootDelegation) namespaces else Set.empty, + namespaces = if (requireRootDelegation) Set.empty else namespaces, + ) + } + + final case class RequiredUids(uids: Set[UniqueIdentifier]) extends RequiredAuthX { + override def satisfiedByActualAuthorizers( + namespacesWithRoot: Set[Namespace], + namespaces: Set[Namespace], + providedUids: Set[UniqueIdentifier], + ): Either[RequiredAuthXAuthorizations, Unit] = { + val missing = uids.filter(uid => !providedUids(uid) && !namespaces(uid.namespace)) + Either.cond(missing.isEmpty, (), RequiredAuthXAuthorizations(uids = missing)) + } + + override def authorizations: RequiredAuthXAuthorizations = RequiredAuthXAuthorizations( + namespaces = uids.map(_.namespace), + uids = uids, + ) + } + + private[transaction] final case class And( + first: RequiredAuthX, + second: RequiredAuthX, + ) extends RequiredAuthX { + override def satisfiedByActualAuthorizers( + namespacesWithRoot: Set[Namespace], + namespaces: Set[Namespace], + uids: Set[UniqueIdentifier], + ): Either[RequiredAuthXAuthorizations, Unit] = + first + .satisfiedByActualAuthorizers(namespacesWithRoot, namespaces, uids) + .flatMap(_ => + second + .satisfiedByActualAuthorizers(namespacesWithRoot, namespaces, uids) + ) + + override def authorizations: RequiredAuthXAuthorizations = + RequiredAuthXAuthorizations.monoid.combine(first.authorizations, second.authorizations) + } + + private[transaction] final case class Or( + first: RequiredAuthX, + second: RequiredAuthX, + ) extends RequiredAuthX { + override def satisfiedByActualAuthorizers( + namespacesWithRoot: Set[Namespace], + namespaces: Set[Namespace], + uids: Set[UniqueIdentifier], + ): Either[RequiredAuthXAuthorizations, Unit] = + first + .satisfiedByActualAuthorizers(namespacesWithRoot, namespaces, uids) + .orElse( + second + .satisfiedByActualAuthorizers(namespacesWithRoot, namespaces, uids) + ) + + override def authorizations: RequiredAuthXAuthorizations = + RequiredAuthXAuthorizations.monoid.combine(first.authorizations, second.authorizations) + } + } + + def fromProtoV2(proto: v2.TopologyMappingX): ParsingResult[TopologyMappingX] = + proto.mapping match { + case Mapping.Empty => + Left(ProtoDeserializationError.TransactionDeserialization("No mapping set")) + case Mapping.NamespaceDelegation(value) => NamespaceDelegationX.fromProtoV2(value) + case Mapping.IdentifierDelegation(value) => IdentifierDelegationX.fromProtoV2(value) + case Mapping.UnionspaceDefinition(value) => UnionspaceDefinitionX.fromProtoV2(value) + case Mapping.OwnerToKeyMapping(value) => OwnerToKeyMappingX.fromProtoV2(value) + case Mapping.DomainTrustCertificate(value) => DomainTrustCertificateX.fromProtoV2(value) + case Mapping.PartyHostingLimits(value) => PartyHostingLimitsX.fromProtoV2(value) + case Mapping.ParticipantPermission(value) => ParticipantDomainPermissionX.fromProtoV2(value) + case Mapping.VettedPackages(value) => VettedPackagesX.fromProtoV2(value) + case Mapping.PartyToParticipant(value) => PartyToParticipantX.fromProtoV2(value) + case Mapping.AuthorityOf(value) => AuthorityOfX.fromProtoV2(value) + case Mapping.DomainParametersState(value) => DomainParametersStateX.fromProtoV2(value) + case Mapping.MediatorDomainState(value) => MediatorDomainStateX.fromProtoV2(value) + case Mapping.SequencerDomainState(value) => SequencerDomainStateX.fromProtoV2(value) + case Mapping.PurgeTopologyTxs(value) => PurgeTopologyTransactionX.fromProtoV2(value) + case Mapping.TrafficControlState(value) => TrafficControlStateX.fromProtoV2(value) + } + + private[transaction] def addDomainId( + builder: HashBuilder, + domainId: Option[DomainId], + ): HashBuilder = + builder.add(domainId.map(_.uid.toProtoPrimitive).getOrElse("none")) + +} + +/** A namespace delegation transaction (intermediate CA) + * + * Entrusts a public-key to perform changes on the namespace + * {(*,I) => p_k} + * + * If the delegation is a root delegation, then the target key + * inherits the right to authorize other NamespaceDelegations. + */ +final case class NamespaceDelegationX private ( + namespace: Namespace, + target: SigningPublicKey, + isRootDelegation: Boolean, +) extends TopologyMappingX { + + def toProto: v2.NamespaceDelegationX = + v2.NamespaceDelegationX( + namespace = namespace.fingerprint.unwrap, + targetKey = Some(target.toProtoV0), + isRootDelegation = isRootDelegation, + ) + + override def toProtoV2: v2.TopologyMappingX = + v2.TopologyMappingX( + v2.TopologyMappingX.Mapping.NamespaceDelegation( + toProto + ) + ) + + override def code: Code = Code.NamespaceDelegationX + + override def maybeUid: Option[UniqueIdentifier] = None + + override def restrictedToDomain: Option[DomainId] = None + + override def requiredAuth( + previous: Option[TopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] + ): RequiredAuthX = { + // All namespace delegation creations require the root delegation privilege. + RequiredNamespaces(Set(namespace), requireRootDelegation = true) + } + + override protected def addUniqueKeyToBuilder(builder: HashBuilder): HashBuilder = + builder + .add(namespace.fingerprint.unwrap) + .add(target.fingerprint.unwrap) +} + +object NamespaceDelegationX { + + def create( + namespace: Namespace, + target: SigningPublicKey, + isRootDelegation: Boolean, + ): Either[String, NamespaceDelegationX] = + Either.cond( + isRootDelegation || namespace.fingerprint != target.fingerprint, + NamespaceDelegationX(namespace, target, isRootDelegation), + s"Root certificate for $namespace needs to be set as isRootDelegation = true", + ) + + @VisibleForTesting + protected[canton] def tryCreate( + namespace: Namespace, + target: SigningPublicKey, + isRootDelegation: Boolean, + ): NamespaceDelegationX = + create(namespace, target, isRootDelegation).fold(err => sys.error(err), identity) + + def code: TopologyMappingX.Code = Code.NamespaceDelegationX + + /** Returns true if the given transaction is a self-signed root certificate */ + def isRootCertificate(sit: GenericSignedTopologyTransactionX): Boolean = { + ((sit.transaction.op == TopologyChangeOpX.Replace && sit.transaction.serial == PositiveInt.one) || + (sit.transaction.op == TopologyChangeOpX.Remove && sit.transaction.serial != PositiveInt.one)) && + sit.transaction.mapping + .select[transaction.NamespaceDelegationX] + .exists(ns => + sit.signatures.size == 1 && + sit.signatures.head1.signedBy == ns.namespace.fingerprint && + ns.isRootDelegation && + ns.target.fingerprint == ns.namespace.fingerprint + ) + } + + /** Returns true if the given transaction is a root delegation */ + def isRootDelegation(sit: GenericSignedTopologyTransactionX): Boolean = { + isRootCertificate(sit) || ( + sit.transaction.op == TopologyChangeOpX.Replace && + sit.transaction.mapping + .select[transaction.NamespaceDelegationX] + .exists(ns => ns.isRootDelegation) + ) + } + + def fromProtoV2( + value: v2.NamespaceDelegationX + ): ParsingResult[NamespaceDelegationX] = + for { + namespace <- Fingerprint.fromProtoPrimitive(value.namespace).map(Namespace(_)) + target <- ProtoConverter.parseRequired( + SigningPublicKey.fromProtoV0, + "target_key", + value.targetKey, + ) + } yield NamespaceDelegationX(namespace, target, value.isRootDelegation) + +} + +/** which sequencers are active on the given domain + * + * authorization: whoever controls the domain and all the owners of the active or observing sequencers that + * were not already present in the tx with serial = n - 1 + * exception: a sequencer can leave the consortium unilaterally as long as there are enough members + * to reach the threshold + */ +final case class UnionspaceDefinitionX private ( + unionspace: Namespace, + threshold: PositiveInt, + owners: NonEmpty[Set[Namespace]], +) extends TopologyMappingX { + + def toProto: v2.UnionspaceDefinitionX = + v2.UnionspaceDefinitionX( + unionspace = unionspace.fingerprint.unwrap, + threshold = threshold.unwrap, + owners = owners.toSeq.map(_.toProtoPrimitive), + ) + + override def toProtoV2: v2.TopologyMappingX = + v2.TopologyMappingX( + v2.TopologyMappingX.Mapping.UnionspaceDefinition( + toProto + ) + ) + + override def code: Code = Code.UnionspaceDefinitionX + + override def namespace: Namespace = unionspace + override def maybeUid: Option[UniqueIdentifier] = None + + override def restrictedToDomain: Option[DomainId] = None + + override def requiredAuth( + previous: Option[TopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] + ): RequiredAuthX = { + previous match { + case None => + RequiredNamespaces(owners.forgetNE) + case Some( + TopologyTransactionX( + _op, + _serial, + UnionspaceDefinitionX(`unionspace`, previousThreshold, previousOwners), + ) + ) => + val added = owners.diff(previousOwners) + // all added owners MUST sign + RequiredNamespaces(added) + // and the quorum of existing owners + .and( + RequiredNamespaces( + Set(unionspace) + ) + ) + case Some(topoTx) => + // TODO(#14048): proper error or ignore + sys.error(s"unexpected transaction data: $previous") + } + } + + override protected def addUniqueKeyToBuilder(builder: HashBuilder): HashBuilder = + builder.add(unionspace.fingerprint.unwrap) +} + +object UnionspaceDefinitionX { + + def code: TopologyMappingX.Code = Code.UnionspaceDefinitionX + + def create( + unionspace: Namespace, + threshold: PositiveInt, + owners: NonEmpty[Set[Namespace]], + ): Either[String, UnionspaceDefinitionX] = + for { + _ <- Either.cond( + owners.size >= threshold.value, + (), + s"Invalid threshold (${threshold}) for ${unionspace} with ${owners.size} owners", + ) + } yield UnionspaceDefinitionX(unionspace, threshold, owners) + + def fromProtoV2( + value: v2.UnionspaceDefinitionX + ): ParsingResult[UnionspaceDefinitionX] = { + val v2.UnionspaceDefinitionX(unionspaceP, thresholdP, ownersP) = value + for { + unionspace <- Fingerprint.fromProtoPrimitive(unionspaceP).map(Namespace(_)) + threshold <- ProtoConverter.parsePositiveInt(thresholdP) + owners <- ownersP.traverse(Fingerprint.fromProtoPrimitive) + ownersNE <- NonEmpty + .from(owners.toSet) + .toRight( + ProtoDeserializationError.InvariantViolation( + "owners cannot be empty" + ) + ) + item <- create(unionspace, threshold, ownersNE.map(Namespace(_))) + .leftMap(ProtoDeserializationError.OtherError) + } yield item + } + + def computeNamespace( + owners: Set[Namespace] + ): Namespace = { + val builder = Hash.build(HashPurpose.UnionspaceNamespace, HashAlgorithm.Sha256) + owners.toSeq + .sorted(Namespace.namespaceOrder.toOrdering) + .foreach(ns => builder.add(ns.fingerprint.unwrap)) + Namespace(Fingerprint(builder.finish().toLengthLimitedHexString)) + } +} + +/** An identifier delegation + * + * entrusts a public-key to do any change with respect to the identifier + * {(X,I) => p_k} + */ +final case class IdentifierDelegationX(identifier: UniqueIdentifier, target: SigningPublicKey) + extends TopologyMappingX { + + def toProto: v2.IdentifierDelegationX = + v2.IdentifierDelegationX( + uniqueIdentifier = identifier.toProtoPrimitive, + targetKey = Some(target.toProtoV0), + ) + + override def toProtoV2: v2.TopologyMappingX = + v2.TopologyMappingX( + v2.TopologyMappingX.Mapping.IdentifierDelegation( + toProto + ) + ) + + override def code: Code = Code.IdentifierDelegationX + + override def namespace: Namespace = identifier.namespace + override def maybeUid: Option[UniqueIdentifier] = Some(identifier) + + override def restrictedToDomain: Option[DomainId] = None + + override def requiredAuth( + previous: Option[TopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] + ): RequiredAuthX = RequiredUids(Set(identifier)) + + override protected def addUniqueKeyToBuilder(builder: HashBuilder): HashBuilder = + builder + .add(identifier.toProtoPrimitive) + .add(target.fingerprint.unwrap) +} + +object IdentifierDelegationX { + + def code: Code = Code.IdentifierDelegationX + + def fromProtoV2( + value: v2.IdentifierDelegationX + ): ParsingResult[IdentifierDelegationX] = + for { + identifier <- UniqueIdentifier.fromProtoPrimitive(value.uniqueIdentifier, "unique_identifier") + target <- ProtoConverter.parseRequired( + SigningPublicKey.fromProtoV0, + "target_key", + value.targetKey, + ) + } yield IdentifierDelegationX(identifier, target) +} + +/** A key owner (participant, mediator, sequencer) to key mapping + * + * In Canton, we need to know keys for all participating entities. The entities are + * all the protocol members (participant, mediator) plus the + * sequencer (which provides the communication infrastructure for the protocol members). + */ +final case class OwnerToKeyMappingX( + member: Member, + domain: Option[DomainId], + keys: NonEmpty[Seq[PublicKey]], +) extends TopologyMappingX { + + override protected def addUniqueKeyToBuilder(builder: HashBuilder): HashBuilder = + TopologyMappingX.addDomainId(builder.add(member.uid.toProtoPrimitive), domain) + + def toProto: v2.OwnerToKeyMappingX = v2.OwnerToKeyMappingX( + member = member.toProtoPrimitive, + publicKeys = keys.map(_.toProtoPublicKeyV0), + domain = domain.map(_.toProtoPrimitive).getOrElse(""), + ) + + def toProtoV2: v2.TopologyMappingX = + v2.TopologyMappingX( + v2.TopologyMappingX.Mapping.OwnerToKeyMapping( + toProto + ) + ) + + def code: TopologyMappingX.Code = Code.OwnerToKeyMappingX + + override def namespace: Namespace = member.uid.namespace + override def maybeUid: Option[UniqueIdentifier] = Some(member.uid) + + override def restrictedToDomain: Option[DomainId] = domain + + override def requiredAuth( + previous: Option[TopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] + ): RequiredAuthX = RequiredUids(Set(member.uid)) + +} + +object OwnerToKeyMappingX { + + def code: TopologyMappingX.Code = Code.OwnerToKeyMappingX + + def fromProtoV2( + value: v2.OwnerToKeyMappingX + ): ParsingResult[OwnerToKeyMappingX] = { + val v2.OwnerToKeyMappingX(memberP, keysP, domainP) = value + for { + member <- Member.fromProtoPrimitive(memberP, "member") + keys <- keysP.traverse(x => + ProtoConverter + .parseRequired(PublicKey.fromProtoPublicKeyV0, "public_keys", Some(x)) + ) + keysNE <- NonEmpty + .from(keys) + .toRight(ProtoDeserializationError.FieldNotSet("public_keys"): ProtoDeserializationError) + domain <- OptionUtil + .emptyStringAsNone(domainP) + .traverse(DomainId.fromProtoPrimitive(_, "domain")) + } yield OwnerToKeyMappingX(member, domain, keysNE) + } + +} + +/** Participant domain trust certificate + */ +final case class DomainTrustCertificateX( + participantId: ParticipantId, + domainId: DomainId, + // TODO(#15399): respect this restriction when reassigning contracts + transferOnlyToGivenTargetDomains: Boolean, + targetDomains: Seq[DomainId], +) extends TopologyMappingX { + + def toProto: v2.DomainTrustCertificateX = + v2.DomainTrustCertificateX( + participant = participantId.toProtoPrimitive, + domain = domainId.toProtoPrimitive, + transferOnlyToGivenTargetDomains = transferOnlyToGivenTargetDomains, + targetDomains = targetDomains.map(_.toProtoPrimitive), + ) + + override def toProtoV2: v2.TopologyMappingX = + v2.TopologyMappingX( + v2.TopologyMappingX.Mapping.DomainTrustCertificate( + toProto + ) + ) + + override def code: Code = Code.DomainTrustCertificateX + + override def namespace: Namespace = participantId.uid.namespace + override def maybeUid: Option[UniqueIdentifier] = Some(participantId.uid) + + override def restrictedToDomain: Option[DomainId] = Some(domainId) + + override def requiredAuth( + previous: Option[TopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] + ): RequiredAuthX = + RequiredUids(Set(participantId.uid)) + + override protected def addUniqueKeyToBuilder(builder: HashBuilder): HashBuilder = + builder + .add(participantId.toProtoPrimitive) + .add(domainId.toProtoPrimitive) +} + +object DomainTrustCertificateX { + + def code: Code = Code.DomainTrustCertificateX + + def fromProtoV2( + value: v2.DomainTrustCertificateX + ): ParsingResult[DomainTrustCertificateX] = + for { + participantId <- ParticipantId.fromProtoPrimitive(value.participant, "participant") + domainId <- DomainId.fromProtoPrimitive(value.domain, "domain") + transferOnlyToGivenTargetDomains = value.transferOnlyToGivenTargetDomains + targetDomains <- value.targetDomains.traverse( + DomainId.fromProtoPrimitive(_, "target_domains") + ) + } yield DomainTrustCertificateX( + participantId, + domainId, + transferOnlyToGivenTargetDomains, + targetDomains, + ) +} + +/* Participant domain permission + */ +sealed abstract class ParticipantPermissionX(val canConfirm: Boolean) + extends Product + with Serializable { + def toProtoV2: v2.ParticipantPermissionX + def toNonX: ParticipantPermission +} +object ParticipantPermissionX { + case object Submission extends ParticipantPermissionX(canConfirm = true) { + lazy val toProtoV2 = v2.ParticipantPermissionX.Submission + override def toNonX: ParticipantPermission = ParticipantPermission.Submission + } + case object Confirmation extends ParticipantPermissionX(canConfirm = true) { + lazy val toProtoV2 = v2.ParticipantPermissionX.Confirmation + override def toNonX: ParticipantPermission = ParticipantPermission.Confirmation + } + case object Observation extends ParticipantPermissionX(canConfirm = false) { + lazy val toProtoV2 = v2.ParticipantPermissionX.Observation + override def toNonX: ParticipantPermission = ParticipantPermission.Observation + } + + def fromProtoV2(value: v2.ParticipantPermissionX): ParsingResult[ParticipantPermissionX] = + value match { + case v2.ParticipantPermissionX.MissingParticipantPermission => + Left(FieldNotSet(value.name)) + case v2.ParticipantPermissionX.Submission => Right(Submission) + case v2.ParticipantPermissionX.Confirmation => Right(Confirmation) + case v2.ParticipantPermissionX.Observation => Right(Observation) + case v2.ParticipantPermissionX.Unrecognized(x) => Left(UnrecognizedEnum(value.name, x)) + } + + implicit val orderingParticipantPermissionX: Ordering[ParticipantPermissionX] = { + val participantPermissionXOrderMap = Seq[ParticipantPermissionX]( + Observation, + Confirmation, + Submission, + ).zipWithIndex.toMap + Ordering.by[ParticipantPermissionX, Int](participantPermissionXOrderMap(_)) + } +} + +sealed trait TrustLevelX { + def toProtoV2: v2.TrustLevelX + def toNonX: TrustLevel +} +object TrustLevelX { + case object Ordinary extends TrustLevelX { + lazy val toProtoV2 = v2.TrustLevelX.Ordinary + def toNonX: TrustLevel = TrustLevel.Ordinary + } + case object Vip extends TrustLevelX { + lazy val toProtoV2 = v2.TrustLevelX.Vip + def toNonX: TrustLevel = TrustLevel.Vip + } + + def fromProtoV2(value: v2.TrustLevelX): ParsingResult[TrustLevelX] = value match { + case v2.TrustLevelX.Ordinary => Right(Ordinary) + case v2.TrustLevelX.Vip => Right(Vip) + case v2.TrustLevelX.MissingTrustLevel => Left(FieldNotSet(value.name)) + case v2.TrustLevelX.Unrecognized(x) => Left(UnrecognizedEnum(value.name, x)) + } + + implicit val orderingTrustLevelX: Ordering[TrustLevelX] = { + val participantTrustLevelXOrderMap = + Seq[TrustLevelX](Ordinary, Vip).zipWithIndex.toMap + Ordering.by[TrustLevelX, Int](participantTrustLevelXOrderMap(_)) + } +} + +final case class ParticipantDomainLimits(maxRate: Int, maxNumParties: Int, maxNumPackages: Int) { + def toProto: v2.ParticipantDomainLimits = + v2.ParticipantDomainLimits(maxRate, maxNumParties, maxNumPackages) +} +object ParticipantDomainLimits { + def fromProtoV2(value: v2.ParticipantDomainLimits): ParticipantDomainLimits = + ParticipantDomainLimits(value.maxRate, value.maxNumParties, value.maxNumPackages) +} + +final case class ParticipantDomainPermissionX( + domainId: DomainId, + participantId: ParticipantId, + permission: ParticipantPermissionX, + trustLevel: TrustLevelX, + limits: Option[ParticipantDomainLimits], + loginAfter: Option[CantonTimestamp], +) extends TopologyMappingX { + + def toParticipantAttributes: ParticipantAttributes = + ParticipantAttributes(permission.toNonX, trustLevel.toNonX) + + def toProto: v2.ParticipantDomainPermissionX = + v2.ParticipantDomainPermissionX( + domain = domainId.toProtoPrimitive, + participant = participantId.toProtoPrimitive, + permission = permission.toProtoV2, + trustLevel = trustLevel.toProtoV2, + limits = limits.map(_.toProto), + loginAfter = loginAfter.map(_.toProtoPrimitive), + ) + + override def toProtoV2: v2.TopologyMappingX = + v2.TopologyMappingX( + v2.TopologyMappingX.Mapping.ParticipantPermission( + toProto + ) + ) + + override def code: Code = Code.ParticipantDomainPermissionX + + override def namespace: Namespace = domainId.uid.namespace + override def maybeUid: Option[UniqueIdentifier] = Some(domainId.uid) + + override def restrictedToDomain: Option[DomainId] = Some(domainId) + + override def requiredAuth( + previous: Option[TopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] + ): RequiredAuthX = + RequiredUids(Set(domainId.uid)) + + override protected def addUniqueKeyToBuilder(builder: HashBuilder): HashBuilder = + builder + .add(domainId.toProtoPrimitive) + .add(participantId.toProtoPrimitive) + + def setDefaultLimitIfNotSet( + defaultLimits: ParticipantDomainLimits + ): ParticipantDomainPermissionX = + if (limits.nonEmpty) + this + else + ParticipantDomainPermissionX( + domainId, + participantId, + permission, + trustLevel, + Some(defaultLimits), + loginAfter, + ) +} + +object ParticipantDomainPermissionX { + + def code: Code = Code.ParticipantDomainPermissionX + + def default( + domainId: DomainId, + participantId: ParticipantId, + ): ParticipantDomainPermissionX = + ParticipantDomainPermissionX( + domainId, + participantId, + ParticipantPermissionX.Submission, + TrustLevelX.Ordinary, + None, + None, + ) + + def fromProtoV2( + value: v2.ParticipantDomainPermissionX + ): ParsingResult[ParticipantDomainPermissionX] = + for { + domainId <- DomainId.fromProtoPrimitive(value.domain, "domain") + participantId <- ParticipantId.fromProtoPrimitive(value.participant, "participant") + permission <- ParticipantPermissionX.fromProtoV2(value.permission) + trustLevel <- TrustLevelX.fromProtoV2(value.trustLevel) + limits = value.limits.map(ParticipantDomainLimits.fromProtoV2) + loginAfter <- value.loginAfter.fold[ParsingResult[Option[CantonTimestamp]]](Right(None))( + CantonTimestamp.fromProtoPrimitive(_).map(_.some) + ) + } yield ParticipantDomainPermissionX( + domainId, + participantId, + permission, + trustLevel, + limits, + loginAfter, + ) +} + +// Party hosting limits +final case class PartyHostingLimitsX( + domainId: DomainId, + partyId: PartyId, + quota: Int, +) extends TopologyMappingX { + + def toProto: v2.PartyHostingLimitsX = + v2.PartyHostingLimitsX( + domain = domainId.toProtoPrimitive, + party = partyId.toProtoPrimitive, + quota = quota, + ) + + override def toProtoV2: v2.TopologyMappingX = + v2.TopologyMappingX( + v2.TopologyMappingX.Mapping.PartyHostingLimits( + toProto + ) + ) + + override def code: Code = Code.PartyHostingLimitsX + + override def namespace: Namespace = domainId.uid.namespace + override def maybeUid: Option[UniqueIdentifier] = Some(domainId.uid) + + override def restrictedToDomain: Option[DomainId] = Some(domainId) + + override def requiredAuth( + previous: Option[TopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] + ): RequiredAuthX = + RequiredUids(Set(domainId.uid)) + + override protected def addUniqueKeyToBuilder(builder: HashBuilder): HashBuilder = + builder + .add(domainId.toProtoPrimitive) + .add(partyId.toProtoPrimitive) +} + +object PartyHostingLimitsX { + + def code: Code = Code.PartyHostingLimitsX + + def fromProtoV2( + value: v2.PartyHostingLimitsX + ): ParsingResult[PartyHostingLimitsX] = + for { + domainId <- DomainId.fromProtoPrimitive(value.domain, "domain") + partyId <- PartyId.fromProtoPrimitive(value.party, "party") + quota = value.quota + } yield PartyHostingLimitsX(domainId, partyId, quota) +} + +// Package vetting +final case class VettedPackagesX( + participantId: ParticipantId, + domainId: Option[DomainId], + packageIds: Seq[LfPackageId], +) extends TopologyMappingX { + + def toProto: v2.VettedPackagesX = + v2.VettedPackagesX( + participant = participantId.toProtoPrimitive, + packageIds = packageIds, + domain = domainId.fold("")(_.toProtoPrimitive), + ) + + override def toProtoV2: v2.TopologyMappingX = + v2.TopologyMappingX( + v2.TopologyMappingX.Mapping.VettedPackages( + toProto + ) + ) + + override def code: Code = Code.VettedPackagesX + + override def namespace: Namespace = participantId.uid.namespace + override def maybeUid: Option[UniqueIdentifier] = Some(participantId.uid) + + override def restrictedToDomain: Option[DomainId] = domainId + + override def requiredAuth( + previous: Option[TopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] + ): RequiredAuthX = + RequiredUids(Set(participantId.uid)) + + override protected def addUniqueKeyToBuilder(builder: HashBuilder): HashBuilder = + builder + .add(participantId.toProtoPrimitive) + .add(domainId.fold("")(_.toProtoPrimitive)) +} + +object VettedPackagesX { + + def code: Code = Code.VettedPackagesX + + def fromProtoV2( + value: v2.VettedPackagesX + ): ParsingResult[VettedPackagesX] = + for { + participantId <- ParticipantId.fromProtoPrimitive(value.participant, "participant") + packageIds <- value.packageIds + .traverse(LfPackageId.fromString) + .leftMap(ProtoDeserializationError.ValueConversionError("package_ids", _)) + domainId <- + if (value.domain.nonEmpty) + DomainId.fromProtoPrimitive(value.domain, "domain").map(_.some) + else Right(None) + } yield VettedPackagesX(participantId, domainId, packageIds) +} + +// Party to participant mappings +final case class HostingParticipant( + participantId: ParticipantId, + permission: ParticipantPermissionX, +) { + def toProto: v2.PartyToParticipantX.HostingParticipant = + v2.PartyToParticipantX.HostingParticipant( + participant = participantId.toProtoPrimitive, + permission = permission.toProtoV2, + ) +} + +object HostingParticipant { + def fromProtoV2( + value: v2.PartyToParticipantX.HostingParticipant + ): ParsingResult[HostingParticipant] = for { + participantId <- ParticipantId.fromProtoPrimitive(value.participant, "participant") + permission <- ParticipantPermissionX.fromProtoV2(value.permission) + } yield HostingParticipant(participantId, permission) +} + +final case class PartyToParticipantX( + partyId: PartyId, + domainId: Option[DomainId], + threshold: PositiveInt, + participants: Seq[HostingParticipant], + groupAddressing: Boolean, +) extends TopologyMappingX { + + def toProto: v2.PartyToParticipantX = + v2.PartyToParticipantX( + party = partyId.toProtoPrimitive, + threshold = threshold.value, + participants = participants.map(_.toProto), + groupAddressing = groupAddressing, + domain = domainId.fold("")(_.toProtoPrimitive), + ) + + override def toProtoV2: v2.TopologyMappingX = + v2.TopologyMappingX( + v2.TopologyMappingX.Mapping.PartyToParticipant( + toProto + ) + ) + + override def code: Code = Code.PartyToParticipantX + + override def namespace: Namespace = partyId.uid.namespace + override def maybeUid: Option[UniqueIdentifier] = Some(partyId.uid) + + override def restrictedToDomain: Option[DomainId] = domainId + + def participantIds: Seq[ParticipantId] = participants.map(_.participantId) + + override def requiredAuth( + previous: Option[TopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] + ): RequiredAuthX = { + // TODO(#12390): take into account the previous transaction and allow participants to unilaterally + // disassociate themselves from a party as long as the threshold can still be reached + previous + .collect { + case TopologyTransactionX( + TopologyChangeOpX.Replace, + _, + PartyToParticipantX(partyId, _, _, previousParticipants, _), + ) => + val addedParticipants = participants + .map(_.participantId.uid) + .diff(previousParticipants.map(_.participantId.uid)) + RequiredUids( + Set(partyId.uid) ++ addedParticipants + ) + } + .getOrElse( + RequiredUids(Set(partyId.uid) ++ participants.map(_.participantId.uid)) + ) + } + + override protected def addUniqueKeyToBuilder(builder: HashBuilder): HashBuilder = + builder + .add(partyId.toProtoPrimitive) + .add(domainId.fold("")(_.toProtoPrimitive)) +} + +object PartyToParticipantX { + + def code: Code = Code.PartyToParticipantX + + def fromProtoV2( + value: v2.PartyToParticipantX + ): ParsingResult[PartyToParticipantX] = + for { + partyId <- PartyId.fromProtoPrimitive(value.party, "party") + threshold <- ProtoConverter.parsePositiveInt(value.threshold) + participants <- value.participants.traverse(HostingParticipant.fromProtoV2) + groupAddressing = value.groupAddressing + domainId <- + if (value.domain.nonEmpty) + DomainId.fromProtoPrimitive(value.domain, "domain").map(_.some) + else Right(None) + } yield PartyToParticipantX(partyId, domainId, threshold, participants, groupAddressing) +} + +// AuthorityOfX +final case class AuthorityOfX( + partyId: PartyId, + domainId: Option[DomainId], + threshold: PositiveInt, + parties: Seq[PartyId], +) extends TopologyMappingX { + + def toProto: v2.AuthorityOfX = + v2.AuthorityOfX( + party = partyId.toProtoPrimitive, + threshold = threshold.unwrap, + parties = parties.map(_.toProtoPrimitive), + domain = domainId.fold("")(_.toProtoPrimitive), + ) + + override def toProtoV2: v2.TopologyMappingX = + v2.TopologyMappingX( + v2.TopologyMappingX.Mapping.AuthorityOf( + toProto + ) + ) + + override def code: Code = Code.AuthorityOfX + + override def namespace: Namespace = partyId.uid.namespace + override def maybeUid: Option[UniqueIdentifier] = Some(partyId.uid) + + override def restrictedToDomain: Option[DomainId] = domainId + + override def requiredAuth( + previous: Option[TopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] + ): RequiredAuthX = { + // TODO(#12390): take the previous transaction into account + RequiredUids(Set(partyId.uid) ++ parties.map(_.uid)) + } + + override protected def addUniqueKeyToBuilder(builder: HashBuilder): HashBuilder = + builder + .add(partyId.toProtoPrimitive) + .add(domainId.fold("")(_.toProtoPrimitive)) +} + +object AuthorityOfX { + + def code: Code = Code.AuthorityOfX + + def fromProtoV2( + value: v2.AuthorityOfX + ): ParsingResult[AuthorityOfX] = + for { + partyId <- PartyId.fromProtoPrimitive(value.party, "party") + threshold <- ProtoConverter.parsePositiveInt(value.threshold) + parties <- value.parties.traverse(PartyId.fromProtoPrimitive(_, "parties")) + domainId <- + if (value.domain.nonEmpty) + DomainId.fromProtoPrimitive(value.domain, "domain").map(_.some) + else Right(None) + } yield AuthorityOfX(partyId, domainId, threshold, parties) +} + +/** Dynamic domain parameter settings for the domain + * + * Each domain has a set of parameters that can be changed at runtime. + * These changes are authorized by the owner of the domain and distributed + * to all nodes accordingly. + */ +final case class DomainParametersStateX(domain: DomainId, parameters: DynamicDomainParameters) + extends TopologyMappingX { + + override protected def addUniqueKeyToBuilder(builder: HashBuilder): HashBuilder = + builder.add(domain.uid.toProtoPrimitive) + + def toProtoV2: v2.TopologyMappingX = + v2.TopologyMappingX( + v2.TopologyMappingX.Mapping.DomainParametersState( + v2.DomainParametersStateX( + domain = domain.toProtoPrimitive, + domainParameters = Some(parameters.toProtoV2), + ) + ) + ) + + def code: TopologyMappingX.Code = Code.DomainParametersStateX + + override def namespace: Namespace = domain.uid.namespace + override def maybeUid: Option[UniqueIdentifier] = Some(domain.uid) + + override def restrictedToDomain: Option[DomainId] = Some(domain) + + override def requiredAuth( + previous: Option[TopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] + ): RequiredAuthX = RequiredUids(Set(domain.uid)) +} + +object DomainParametersStateX { + + def code: TopologyMappingX.Code = Code.DomainParametersStateX + + def fromProtoV2( + value: v2.DomainParametersStateX + ): ParsingResult[DomainParametersStateX] = { + val v2.DomainParametersStateX(domainIdP, domainParametersP) = value + for { + domainId <- DomainId.fromProtoPrimitive(domainIdP, "domain") + parameters <- ProtoConverter.parseRequired( + DynamicDomainParameters.fromProtoV2, + "domainParameters", + domainParametersP, + ) + } yield DomainParametersStateX(domainId, parameters) + } +} + +/** Mediator definition for a domain + * + * Each domain needs at least one mediator (group), but can have multiple. + * Mediators can be temporarily be turned off by making them observers. This way, + * they get informed but they don't have to reply. + */ +final case class MediatorDomainStateX private ( + domain: DomainId, + group: NonNegativeInt, + threshold: PositiveInt, + active: NonEmpty[Seq[MediatorId]], + observers: Seq[MediatorId], +) extends TopologyMappingX { + + lazy val allMediatorsInGroup = active ++ observers + + override protected def addUniqueKeyToBuilder(builder: HashBuilder): HashBuilder = + builder.add(domain.uid.toProtoPrimitive).add(group.unwrap) + + def toProto: v2.MediatorDomainStateX = + v2.MediatorDomainStateX( + domain = domain.toProtoPrimitive, + group = group.unwrap, + threshold = threshold.unwrap, + active = active.map(_.uid.toProtoPrimitive), + observers = observers.map(_.uid.toProtoPrimitive), + ) + + def toProtoV2: v2.TopologyMappingX = + v2.TopologyMappingX( + v2.TopologyMappingX.Mapping.MediatorDomainState( + toProto + ) + ) + + override def code: TopologyMappingX.Code = Code.MediatorDomainStateX + + override def namespace: Namespace = domain.uid.namespace + override def maybeUid: Option[UniqueIdentifier] = Some(domain.uid) + + override def restrictedToDomain: Option[DomainId] = Some(domain) + + override def requiredAuth( + previous: Option[TopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] + ): RequiredAuthX = RequiredUids(Set(domain.uid)) +} + +object MediatorDomainStateX { + + def code: TopologyMappingX.Code = Code.MediatorDomainStateX + + def create( + domain: DomainId, + group: NonNegativeInt, + threshold: PositiveInt, + active: Seq[MediatorId], + observers: Seq[MediatorId], + ): Either[String, MediatorDomainStateX] = for { + _ <- Either.cond( + threshold.unwrap <= active.length, + (), + s"threshold (${threshold}) of mediator domain state higher than number of mediators ${active.length}", + ) + activeNE <- NonEmpty + .from(active) + .toRight("mediator domain state requires at least one active mediator") + } yield MediatorDomainStateX(domain, group, threshold, activeNE, observers) + + def fromProtoV2( + value: v2.MediatorDomainStateX + ): ParsingResult[MediatorDomainStateX] = { + val v2.MediatorDomainStateX(domainIdP, groupP, thresholdP, activeP, observersP) = value + for { + domainId <- DomainId.fromProtoPrimitive(domainIdP, "domain") + group <- NonNegativeInt + .create(groupP) + .leftMap(ProtoDeserializationError.InvariantViolation(_)) + threshold <- ProtoConverter.parsePositiveInt(thresholdP) + active <- activeP.traverse( + UniqueIdentifier.fromProtoPrimitive(_, "active").map(MediatorId(_)) + ) + observers <- observersP.traverse( + UniqueIdentifier.fromProtoPrimitive(_, "observers").map(MediatorId(_)) + ) + result <- create(domainId, group, threshold, active, observers).leftMap( + ProtoDeserializationError.OtherError + ) + } yield result + } + +} + +/** which sequencers are active on the given domain + * + * authorization: whoever controls the domain and all the owners of the active or observing sequencers that + * were not already present in the tx with serial = n - 1 + * exception: a sequencer can leave the consortium unilaterally as long as there are enough members + * to reach the threshold + * UNIQUE(domain) + */ +final case class SequencerDomainStateX private ( + domain: DomainId, + threshold: PositiveInt, + active: NonEmpty[Seq[SequencerId]], + observers: Seq[SequencerId], +) extends TopologyMappingX { + + lazy val allSequencers = active ++ observers + + override protected def addUniqueKeyToBuilder(builder: HashBuilder): HashBuilder = + builder.add(domain.uid.toProtoPrimitive) + + def toProto: v2.SequencerDomainStateX = + v2.SequencerDomainStateX( + domain = domain.toProtoPrimitive, + threshold = threshold.unwrap, + active = active.map(_.uid.toProtoPrimitive), + observers = observers.map(_.uid.toProtoPrimitive), + ) + + def toProtoV2: v2.TopologyMappingX = + v2.TopologyMappingX( + v2.TopologyMappingX.Mapping.SequencerDomainState( + toProto + ) + ) + + def code: TopologyMappingX.Code = Code.SequencerDomainStateX + + override def namespace: Namespace = domain.uid.namespace + override def maybeUid: Option[UniqueIdentifier] = Some(domain.uid) + + override def restrictedToDomain: Option[DomainId] = Some(domain) + + override def requiredAuth( + previous: Option[TopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] + ): RequiredAuthX = RequiredUids(Set(domain.uid)) +} + +object SequencerDomainStateX { + + def code: TopologyMappingX.Code = Code.SequencerDomainStateX + + def create( + domain: DomainId, + threshold: PositiveInt, + active: Seq[SequencerId], + observers: Seq[SequencerId], + ): Either[String, SequencerDomainStateX] = for { + _ <- Either.cond( + threshold.unwrap <= active.length, + (), + s"threshold (${threshold}) of sequencer domain state higher than number of active sequencers ${active.length}", + ) + activeNE <- NonEmpty + .from(active) + .toRight("sequencer domain state requires at least one active sequencer") + } yield SequencerDomainStateX(domain, threshold, activeNE, observers) + + def fromProtoV2( + value: v2.SequencerDomainStateX + ): ParsingResult[SequencerDomainStateX] = { + val v2.SequencerDomainStateX(domainIdP, thresholdP, activeP, observersP) = value + for { + domainId <- DomainId.fromProtoPrimitive(domainIdP, "domain") + threshold <- ProtoConverter.parsePositiveInt(thresholdP) + active <- activeP.traverse( + UniqueIdentifier.fromProtoPrimitive(_, "active").map(SequencerId(_)) + ) + observers <- observersP.traverse( + UniqueIdentifier.fromProtoPrimitive(_, "observers").map(SequencerId(_)) + ) + result <- create(domainId, threshold, active, observers).leftMap( + ProtoDeserializationError.OtherError + ) + } yield result + } + +} + +// Purge topology transaction-x +final case class PurgeTopologyTransactionX private ( + domain: DomainId, + mappings: NonEmpty[Seq[TopologyMappingX]], +) extends TopologyMappingX { + + override protected def addUniqueKeyToBuilder(builder: HashBuilder): HashBuilder = + builder.add(domain.uid.toProtoPrimitive) + + def toProto: v2.PurgeTopologyTransactionX = + v2.PurgeTopologyTransactionX( + domain = domain.toProtoPrimitive, + mappings = mappings.map(_.toProtoV2), + ) + + def toProtoV2: v2.TopologyMappingX = + v2.TopologyMappingX( + v2.TopologyMappingX.Mapping.PurgeTopologyTxs( + toProto + ) + ) + + def code: TopologyMappingX.Code = Code.PurgeTopologyTransactionX + + override def namespace: Namespace = domain.uid.namespace + override def maybeUid: Option[UniqueIdentifier] = Some(domain.uid) + + override def restrictedToDomain: Option[DomainId] = Some(domain) + + override def requiredAuth( + previous: Option[TopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] + ): RequiredAuthX = RequiredUids(Set(domain.uid)) +} + +object PurgeTopologyTransactionX { + + def code: TopologyMappingX.Code = Code.PurgeTopologyTransactionX + + def create( + domain: DomainId, + mappings: Seq[TopologyMappingX], + ): Either[String, PurgeTopologyTransactionX] = for { + mappingsToPurge <- NonEmpty + .from(mappings) + .toRight("purge topology transaction-x requires at least one topology mapping") + } yield PurgeTopologyTransactionX(domain, mappingsToPurge) + + def fromProtoV2( + value: v2.PurgeTopologyTransactionX + ): ParsingResult[PurgeTopologyTransactionX] = { + val v2.PurgeTopologyTransactionX(domainIdP, mappingsP) = value + for { + domainId <- DomainId.fromProtoPrimitive(domainIdP, "domain") + mappings <- mappingsP.traverse(TopologyMappingX.fromProtoV2) + result <- create(domainId, mappings).leftMap( + ProtoDeserializationError.OtherError + ) + } yield result + } + +} + +// Traffic control state topology transactions +final case class TrafficControlStateX private ( + domain: DomainId, + member: Member, + totalExtraTrafficLimit: PositiveLong, +) extends TopologyMappingX { + + override protected def addUniqueKeyToBuilder(builder: HashBuilder): HashBuilder = + builder.add(domain.uid.toProtoPrimitive).add(member.uid.toProtoPrimitive) + + def toProto: v2.TrafficControlStateX = { + v2.TrafficControlStateX( + domain = domain.toProtoPrimitive, + member = member.toProtoPrimitive, + totalExtraTrafficLimit = totalExtraTrafficLimit.value, + ) + } + + def toProtoV2: v2.TopologyMappingX = + v2.TopologyMappingX( + v2.TopologyMappingX.Mapping.TrafficControlState( + toProto + ) + ) + + def code: TopologyMappingX.Code = Code.TrafficControlStateX + + override def namespace: Namespace = member.uid.namespace + override def maybeUid: Option[UniqueIdentifier] = Some(member.uid) + + override def requiredAuth( + previous: Option[TopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] + ): RequiredAuthX = RequiredUids(Set(domain.uid)) + + override def restrictedToDomain: Option[DomainId] = Some(domain) +} + +object TrafficControlStateX { + + def code: TopologyMappingX.Code = Code.TrafficControlStateX + + def create( + domain: DomainId, + member: Member, + totalExtraTrafficLimit: PositiveLong, + ): Either[String, TrafficControlStateX] = + Right(TrafficControlStateX(domain, member, totalExtraTrafficLimit)) + + def fromProtoV2( + value: v2.TrafficControlStateX + ): ParsingResult[TrafficControlStateX] = { + val v2.TrafficControlStateX(domainIdP, memberP, totalExtraTrafficLimitP) = + value + for { + domainId <- DomainId.fromProtoPrimitive(domainIdP, "domain") + member <- Member.fromProtoPrimitive(memberP, "member") + totalExtraTrafficLimit <- PositiveLong + .create(totalExtraTrafficLimitP) + .leftMap(e => InvariantViolation(e.message)) + result <- create(domainId, member, totalExtraTrafficLimit).leftMap( + ProtoDeserializationError.OtherError + ) + } yield result + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingXChecks.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingXChecks.scala new file mode 100644 index 0000000000..f6db16dc8a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingXChecks.scala @@ -0,0 +1,226 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.transaction + +import cats.data.EitherT +import cats.instances.future.* +import com.digitalasset.canton.config.RequireTypes.PositiveLong +import com.digitalasset.canton.crypto.KeyPurpose +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.topology.ParticipantId +import com.digitalasset.canton.topology.processing.EffectiveTime +import com.digitalasset.canton.topology.store.{ + TopologyStoreId, + TopologyStoreX, + TopologyTransactionRejection, +} +import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX +import com.digitalasset.canton.topology.transaction.TopologyMappingX.Code +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.EitherTUtil + +import scala.concurrent.{ExecutionContext, Future} +import scala.math.Ordered.* + +trait TopologyMappingXChecks { + def checkTransaction( + effective: EffectiveTime, + toValidate: GenericSignedTopologyTransactionX, + inStore: Option[GenericSignedTopologyTransactionX], + )(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] +} + +object NoopTopologyMappingXChecks extends TopologyMappingXChecks { + override def checkTransaction( + effective: EffectiveTime, + toValidate: GenericSignedTopologyTransactionX, + inStore: Option[GenericSignedTopologyTransactionX], + )(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] = + EitherTUtil.unit +} + +class ValidatingTopologyMappingXChecks( + store: TopologyStoreX[TopologyStoreId], + loggerFactory: NamedLoggerFactory, +)(implicit + executionContext: ExecutionContext +) extends TopologyMappingXChecks { + + def checkTransaction( + effective: EffectiveTime, + toValidate: GenericSignedTopologyTransactionX, + inStore: Option[GenericSignedTopologyTransactionX], + )(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] = { + + val checkOpt = (toValidate.mapping.code, inStore.map(_.mapping.code)) match { + case (Code.DomainTrustCertificateX, None | Some(Code.DomainTrustCertificateX)) => + toValidate + .selectMapping[DomainTrustCertificateX] + .map(checkDomainTrustCertificate(effective, _)) + + case (Code.PartyToParticipantX, None | Some(Code.PartyToParticipantX)) => + toValidate + .select[TopologyChangeOpX.Replace, PartyToParticipantX] + .map(checkPartyToParticipant(_, inStore.flatMap(_.selectMapping[PartyToParticipantX]))) + + case (Code.TrafficControlStateX, None | Some(Code.TrafficControlStateX)) => + toValidate + .select[TopologyChangeOpX.Replace, TrafficControlStateX] + .map( + checkTrafficControl( + _, + inStore.flatMap(_.selectMapping[TrafficControlStateX]), + ) + ) + + case otherwise => None + } + checkOpt.getOrElse(EitherTUtil.unit) + } + + /** Checks that the DTC is not being removed if the participant still hosts a party. + * This check is potentially quite expensive: we have to fetch all party to participant mappings, because + * we cannot index by the hosting participants. + */ + private def checkDomainTrustCertificate( + effective: EffectiveTime, + toValidate: SignedTopologyTransactionX[TopologyChangeOpX, DomainTrustCertificateX], + )(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] = { + if (toValidate.transaction.op == TopologyChangeOpX.Remove) { + for { + storedPartyToParticipantMappings <- EitherT + .right[TopologyTransactionRejection]( + store + .findPositiveTransactions( + effective.value, + asOfInclusive = false, + isProposal = false, + types = Seq(PartyToParticipantX.code), + filterUid = None, + filterNamespace = None, + ) + ) + participantToOffboard = toValidate.mapping.participantId + participantHostsParties = storedPartyToParticipantMappings.result.view + .flatMap(_.selectMapping[PartyToParticipantX]) + .collect { + case tx if tx.mapping.participants.exists(_.participantId == participantToOffboard) => + tx.mapping.partyId + } + .toSeq + + _ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection]( + participantHostsParties.isEmpty, + TopologyTransactionRejection.ParticipantStillHostsParties( + participantToOffboard, + participantHostsParties, + ), + ) + } yield () + + } else { + EitherTUtil.unit + } + } + + private val requiredKeyPurposes = Set(KeyPurpose.Encryption, KeyPurpose.Signing) + + /** Checks the following: + * - threshold is less than or equal to the number of confirming participants + * - new participants have a valid DTC + * - new participants have an OTK with at least 1 signing key and 1 encryption key + */ + private def checkPartyToParticipant( + toValidate: SignedTopologyTransactionX[TopologyChangeOpX, PartyToParticipantX], + inStore: Option[SignedTopologyTransactionX[TopologyChangeOpX, PartyToParticipantX]], + )(implicit + traceContext: TraceContext + ): EitherT[Future, TopologyTransactionRejection, Unit] = { + import toValidate.mapping + val numConfirmingParticipants = + mapping.participants.count(_.permission >= ParticipantPermissionX.Confirmation) + + for { + // check the threshold + _ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection]( + mapping.threshold.value <= numConfirmingParticipants, + TopologyTransactionRejection.ThresholdTooHigh( + mapping.threshold.value, + numConfirmingParticipants, + ), + ) + + newParticipants = mapping.participants.map(_.participantId).toSet -- + inStore.toList.flatMap(_.mapping.participants.map(_.participantId)) + participantTransactions <- EitherT.right[TopologyTransactionRejection]( + store + .findPositiveTransactions( + CantonTimestamp.MaxValue, + asOfInclusive = false, + isProposal = false, + types = Seq(DomainTrustCertificateX.code, OwnerToKeyMappingX.code), + filterUid = Some(newParticipants.toSeq.map(_.uid)), + filterNamespace = None, + ) + ) + + // check that all participants are known on the domain + missingParticipantCertificates = newParticipants -- participantTransactions + .collectOfMapping[DomainTrustCertificateX] + .result + .map(_.mapping.participantId) + + _ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection]( + missingParticipantCertificates.isEmpty, + TopologyTransactionRejection.UnknownMembers(missingParticipantCertificates.toSeq), + ) + + // check that all known participants have keys registered + participantsWithInsufficientKeys = + newParticipants -- participantTransactions + .collectOfMapping[OwnerToKeyMappingX] + .result + .view + .filter { tx => + val keyPurposes = tx.mapping.keys.map(_.purpose).toSet + requiredKeyPurposes.forall(keyPurposes) + } + .map(_.mapping.member) + .collect { case pid: ParticipantId => pid } + .toSeq + + _ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection]( + participantsWithInsufficientKeys.isEmpty, + TopologyTransactionRejection.InsufficientKeys(participantsWithInsufficientKeys.toSeq), + ) + } yield { + () + } + } + + /** Checks that the extraTrafficLimit is monotonically increasing */ + private def checkTrafficControl( + toValidate: SignedTopologyTransactionX[TopologyChangeOpX.Replace, TrafficControlStateX], + inStore: Option[SignedTopologyTransactionX[TopologyChangeOpX, TrafficControlStateX]], + ): EitherT[Future, TopologyTransactionRejection, Unit] = { + val minimumExtraTrafficLimit = inStore match { + case None => PositiveLong.one + case Some(TopologyChangeOpX(TopologyChangeOpX.Remove)) => + // if the transaction in the store is a removal, we "reset" the monotonicity requirement + PositiveLong.one + case Some(tx) => tx.mapping.totalExtraTrafficLimit + } + + EitherTUtil.condUnitET( + toValidate.mapping.totalExtraTrafficLimit >= minimumExtraTrafficLimit, + TopologyTransactionRejection.ExtraTrafficLimitTooLow( + toValidate.mapping.member, + toValidate.mapping.totalExtraTrafficLimit, + minimumExtraTrafficLimit, + ), + ) + + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransaction.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransaction.scala new file mode 100644 index 0000000000..02dd92c0d0 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransaction.scala @@ -0,0 +1,514 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.transaction + +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.ProtoDeserializationError.* +import com.digitalasset.canton.config.CantonRequireTypes.{ + LengthLimitedStringWrapper, + LengthLimitedStringWrapperCompanion, + String255, +} +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.logging.pretty.PrettyInstances.* +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.{v0, v1} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.ProtocolVersionedMemoizedEvidence +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.store.StoredTopologyTransaction +import com.digitalasset.canton.version.* +import com.google.protobuf.ByteString +import slick.jdbc.SetParameter + +trait TopologyChangeOpCommon extends Product with Serializable with PrettyPrinting { + override def pretty: Pretty[TopologyChangeOpCommon.this.type] = adHocPrettyInstance +} + +/** Add, Remove, Replace */ +sealed trait TopologyChangeOp extends TopologyChangeOpCommon { + def toProto: v0.TopologyChangeOp +} + +/** +/- */ +sealed abstract class AddRemoveChangeOp(val toProto: v0.TopologyChangeOp) extends TopologyChangeOp + +object AddRemoveChangeOp { + def fromProtoV0( + protoOp: v0.TopologyChangeOp + ): ParsingResult[AddRemoveChangeOp] = + protoOp match { + case v0.TopologyChangeOp.Add => Right(TopologyChangeOp.Add) + case v0.TopologyChangeOp.Remove => Right(TopologyChangeOp.Remove) + case v0.TopologyChangeOp.Replace => Left(InvariantViolation("Replace op is not allowed here")) + case v0.TopologyChangeOp.Unrecognized(x) => Left(UnrecognizedEnum(protoOp.name, x)) + } +} + +object TopologyChangeOp { + sealed trait Positive extends TopologyChangeOp + + final case object Add extends AddRemoveChangeOp(v0.TopologyChangeOp.Add) with Positive + final case object Remove extends AddRemoveChangeOp(v0.TopologyChangeOp.Remove) + + final case object Replace extends TopologyChangeOp with Positive { + def toProto: v0.TopologyChangeOp = v0.TopologyChangeOp.Replace + } + + type Add = Add.type + type Remove = Remove.type + type Replace = Replace.type + + trait OpTypeChecker[A <: TopologyChangeOp] { + def isOfType(op: TopologyChangeOp): Boolean + } + + implicit val topologyAddChecker: OpTypeChecker[Add] = new OpTypeChecker[Add] { + override def isOfType(op: TopologyChangeOp): Boolean = op match { + case _: Add => true + case _ => false + } + } + + implicit val topologyPositiveChecker: OpTypeChecker[Positive] = new OpTypeChecker[Positive] { + override def isOfType(op: TopologyChangeOp): Boolean = op match { + case _: Add | _: Replace => true + case _ => false + } + } + + implicit val topologyRemoveChecker: OpTypeChecker[Remove] = new OpTypeChecker[Remove] { + override def isOfType(op: TopologyChangeOp): Boolean = op match { + case _: Remove => true + case _ => false + } + } + + implicit val topologyReplaceChecker: OpTypeChecker[Replace] = new OpTypeChecker[Replace] { + override def isOfType(op: TopologyChangeOp): Boolean = op match { + case _: Replace => true + case _ => false + } + } + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def select[Op <: TopologyChangeOp](transaction: SignedTopologyTransaction[TopologyChangeOp])( + implicit checker: OpTypeChecker[Op] + ): Option[SignedTopologyTransaction[Op]] = if (checker.isOfType(transaction.operation)) + Some(transaction.asInstanceOf[SignedTopologyTransaction[Op]]) + else None + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def select[Op <: TopologyChangeOp]( + storedTransaction: StoredTopologyTransaction[TopologyChangeOp] + )(implicit + checker: OpTypeChecker[Op] + ): Option[StoredTopologyTransaction[Op]] = if ( + checker.isOfType(storedTransaction.transaction.operation) + ) + Some(storedTransaction.asInstanceOf[StoredTopologyTransaction[Op]]) + else None + + def fromProtoV0( + protoOp: v0.TopologyChangeOp + ): ParsingResult[TopologyChangeOp] = + protoOp match { + case v0.TopologyChangeOp.Add => Right(Add) + case v0.TopologyChangeOp.Remove => Right(Remove) + case v0.TopologyChangeOp.Replace => Right(Replace) + case v0.TopologyChangeOp.Unrecognized(x) => Left(UnrecognizedEnum(protoOp.name, x)) + } + + implicit val setParameterTopologyChangeOp: SetParameter[TopologyChangeOp] = (v, pp) => + v match { + case Add => pp.setInt(1) + case Remove => pp.setInt(2) + case Replace => pp.setInt(3) + } +} + +/** Topology transaction id + * + * Used to distinguish topology transactions from each other such that a Remove explicitly refers to a + * corresponding Add, such that we can support re-addition (Add, Remove, Add again). + */ +final case class TopologyElementId(override protected val str: String255) + extends LengthLimitedStringWrapper + with PrettyPrinting { + def toLengthLimitedString: String255 = str + // TODO(i4933) validate strings when deserializing from proto (must be safesimplestring) + + override def pretty: Pretty[TopologyElementId] = prettyOfString(_.unwrap) +} + +object TopologyElementId extends LengthLimitedStringWrapperCompanion[String255, TopologyElementId] { + def generate(): TopologyElementId = { + TopologyElementId(String255.tryCreate(PseudoRandom.randomAlphaNumericString(32))) + } + + // Reuse externally supplied identifier that needs to be unique. + def adopt(id: String255): TopologyElementId = TopologyElementId(id) + + override def instanceName: String = "TopologyElementId" + + override protected def companion: String255.type = String255 + + override protected def factoryMethodWrapper(str: String255): TopologyElementId = + TopologyElementId(str) +} + +sealed trait TopologyStateElement[+M <: TopologyMapping] extends PrettyPrinting { + def id: TopologyElementId + def mapping: M + def uniquePath: UniquePath +} + +final case class TopologyStateUpdateElement( + id: TopologyElementId, + mapping: TopologyStateUpdateMapping, +) extends TopologyStateElement[TopologyStateUpdateMapping] { + override def pretty: Pretty[TopologyStateUpdateElement] = + prettyOfClass(param("id", _.id), param("mapping", _.mapping)) + + lazy val uniquePath: UniquePath = mapping.uniquePath(id) +} + +final case class DomainGovernanceElement(mapping: DomainGovernanceMapping) + extends TopologyStateElement[DomainGovernanceMapping] { + override def pretty: Pretty[DomainGovernanceElement] = + prettyOfClass(param("id", _.id), param("mapping", _.mapping)) + + lazy val id: TopologyElementId = TopologyElementId(mapping.domainId.toLengthLimitedString) + lazy val uniquePath: UniquePathSignedDomainGovernanceTransaction = + mapping.uniquePath(id) // TODO(#11111): id is not used for the path ; improve API? +} + +/** Defines the required authorization chain */ +sealed trait RequiredAuth { + def namespaces: (Seq[Namespace], Boolean) + def uids: Seq[UniqueIdentifier] +} +object RequiredAuth { + + /** Authorization must be on the namespace level + * + * This implies that it must be authorized by a Namespace delegation. + * The boolean designates if the delegation needs to be a root delegation. + */ + final case class Ns(namespace: Namespace, rootDelegation: Boolean) extends RequiredAuth { + override def namespaces: (Seq[Namespace], Boolean) = (Seq(namespace), true) + override def uids: Seq[UniqueIdentifier] = Seq.empty + } + final case class Uid(override val uids: Seq[UniqueIdentifier]) extends RequiredAuth { + override def namespaces: (Seq[Namespace], Boolean) = + (uids.map(uid => uid.namespace).distinct, false) + } +} + +sealed trait TopologyTransaction[+Op <: TopologyChangeOp] + extends ProtocolVersionedMemoizedEvidence + with PrettyPrinting + with HasProtocolVersionedWrapper[TopologyTransaction[TopologyChangeOp]] + with Product + with Serializable { + def op: Op + def element: TopologyStateElement[TopologyMapping] + + def reverse: TopologyTransaction[TopologyChangeOp] + + @transient override protected lazy val companionObj: TopologyTransaction.type = + TopologyTransaction + + // calculate hash for signature + def hashToSign(hashOps: HashOps): Hash = + hashOps.digest(HashPurpose.TopologyTransactionSignature, this.getCryptographicEvidence) + + override def toByteStringUnmemoized: ByteString = super[HasProtocolVersionedWrapper].toByteString + + def toProtoV1: v1.TopologyTransaction + + def asVersion(protocolVersion: ProtocolVersion): TopologyTransaction[Op] + + def hasEquivalentVersion(protocolVersion: ProtocolVersion): Boolean = + representativeProtocolVersion == TopologyTransaction.protocolVersionRepresentativeFor( + protocolVersion + ) +} + +object TopologyTransaction + extends HasMemoizedProtocolVersionedWrapperCompanion[TopologyTransaction[TopologyChangeOp]] { + override val name: String = "TopologyTransaction" + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.v30)(v1.TopologyTransaction)( + supportedProtoVersionMemoized(_)(fromProtoV1), + _.toProtoV1.toByteString, + ) + ) + + private def fromProtoV1(transactionP: v1.TopologyTransaction)( + bytes: ByteString + ): ParsingResult[TopologyTransaction[TopologyChangeOp]] = transactionP.transaction match { + case v1.TopologyTransaction.Transaction.Empty => + Left(FieldNotSet("TopologyTransaction.transaction.version")) + case v1.TopologyTransaction.Transaction.StateUpdate(stateUpdate) => + TopologyStateUpdate.fromProtoV1(stateUpdate, bytes) + case v1.TopologyTransaction.Transaction.DomainGovernance(domainGovernance) => + DomainGovernanceTransaction.fromProtoV1(domainGovernance, bytes) + } +} + +/** +/-, X -> Y + * + * Every topology transaction is the combination of an operation (Add, Remove), + * a unique element id and the state operation. + * + * An Add can pick a random element id. A remove needs to pick the element id of the corresponding addition. + * Element ids are uniqueness constraints. Once removed, they can't be re-added + * (during a configurable time window) + */ +final case class TopologyStateUpdate[+Op <: AddRemoveChangeOp] private ( + op: Op, + element: TopologyStateUpdateElement, +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + TopologyTransaction.type + ], + val deserializedFrom: Option[ByteString] = None, +) extends TopologyTransaction[Op] { + + private def toStateUpdateProtoV1: v1.TopologyStateUpdate = { + val mappingP: v1.TopologyStateUpdate.Mapping = element.mapping match { + case x: NamespaceDelegation => + v1.TopologyStateUpdate.Mapping.NamespaceDelegation(x.toProtoV0) + case x: IdentifierDelegation => + v1.TopologyStateUpdate.Mapping.IdentifierDelegation(x.toProtoV0) + case x: OwnerToKeyMapping => + v1.TopologyStateUpdate.Mapping.OwnerToKeyMapping(x.toProtoV0) + case x: PartyToParticipant => + v1.TopologyStateUpdate.Mapping.PartyToParticipant(x.toProtoV0) + case x: SignedLegalIdentityClaim => + v1.TopologyStateUpdate.Mapping.SignedLegalIdentityClaim(x.toProtoV0) + case x: ParticipantState => + v1.TopologyStateUpdate.Mapping.ParticipantState(x.toProtoV0) + case x: MediatorDomainState => + v1.TopologyStateUpdate.Mapping.MediatorDomainState(x.toProtoV0) + case x: VettedPackages => + v1.TopologyStateUpdate.Mapping.VettedPackages(x.toProtoV0) + } + + v1.TopologyStateUpdate(operation = op.toProto, id = element.id.unwrap, mapping = mappingP) + } + + def toProtoV1: v1.TopologyTransaction = + v1.TopologyTransaction(v1.TopologyTransaction.Transaction.StateUpdate(toStateUpdateProtoV1)) + + /** Create reversion of this transaction + * + * If this transaction is an Add, we return a corresponding Remove with the same transaction id. + * If this transaction is a Remove, we return an Add with a new transaction id. + */ + def reverse: TopologyTransaction[TopologyChangeOp] = { + import TopologyChangeOp.* + + (op: AddRemoveChangeOp) match { + case Add => TopologyStateUpdate(Remove, element)(representativeProtocolVersion) + case Remove => + TopologyStateUpdate.createAdd(element.mapping, representativeProtocolVersion) + } + } + + override def pretty: Pretty[TopologyStateUpdate.this.type] = + prettyOfClass(param("op", _.op), param("element", _.element)) + + override def asVersion( + protocolVersion: ProtocolVersion + ): TopologyTransaction[Op] = { + TopologyStateUpdate[Op](op, element)( + TopologyTransaction.protocolVersionRepresentativeFor(protocolVersion) + ) + } +} + +object TopologyStateUpdate { + def apply[Op <: AddRemoveChangeOp]( + op: Op, + element: TopologyStateUpdateElement, + protocolVersion: ProtocolVersion, + ): TopologyStateUpdate[Op] = + TopologyStateUpdate(op, element)( + TopologyTransaction.protocolVersionRepresentativeFor(protocolVersion) + ) + + def fromByteString(bytes: ByteString): ParsingResult[TopologyStateUpdate[AddRemoveChangeOp]] = + for { + converted <- TopologyTransaction.fromByteString(bytes) + result <- converted match { + case topologyStateUpdate: TopologyStateUpdate[_] => + Right(topologyStateUpdate) + case _: DomainGovernanceTransaction => + Left( + ProtoDeserializationError.TransactionDeserialization( + "Expecting TopologyStateUpdate, found DomainGovernanceTransaction" + ) + ) + } + } yield result + + private[transaction] def fromProtoV1( + protoTopologyTransaction: v1.TopologyStateUpdate, + bytes: ByteString, + ): ParsingResult[TopologyStateUpdate[AddRemoveChangeOp]] = { + val mappingRes: ParsingResult[TopologyStateUpdateMapping] = + protoTopologyTransaction.mapping match { + + case v1.TopologyStateUpdate.Mapping.IdentifierDelegation(idDelegation) => + IdentifierDelegation.fromProtoV0(idDelegation) + + case v1.TopologyStateUpdate.Mapping.NamespaceDelegation(nsDelegation) => + NamespaceDelegation.fromProtoV0(nsDelegation) + + case v1.TopologyStateUpdate.Mapping.OwnerToKeyMapping(owkm) => + OwnerToKeyMapping.fromProtoV0(owkm) + + case v1.TopologyStateUpdate.Mapping.PartyToParticipant(value) => + PartyToParticipant.fromProtoV0(value) + + case v1.TopologyStateUpdate.Mapping.SignedLegalIdentityClaim(value) => + SignedLegalIdentityClaim.fromProtoV0(value) + + case v1.TopologyStateUpdate.Mapping.ParticipantState(value) => + ParticipantState.fromProtoV0(value) + + case v1.TopologyStateUpdate.Mapping.MediatorDomainState(value) => + MediatorDomainState.fromProtoV0(value) + + case v1.TopologyStateUpdate.Mapping.VettedPackages(value) => + VettedPackages.fromProtoV0(value) + + case v1.TopologyStateUpdate.Mapping.Empty => + Left(UnrecognizedField("TopologyStateUpdate.Mapping is empty")) + } + for { + op <- AddRemoveChangeOp.fromProtoV0(protoTopologyTransaction.operation) + mapping <- mappingRes + id <- TopologyElementId.fromProtoPrimitive(protoTopologyTransaction.id) + } yield TopologyStateUpdate(op, TopologyStateUpdateElement(id, mapping))( + TopologyTransaction.protocolVersionRepresentativeFor(ProtoVersion(1)), + Some(bytes), + ) + } + + def createAdd( + mapping: TopologyStateUpdateMapping, + protocolVersion: ProtocolVersion, + ): TopologyStateUpdate[TopologyChangeOp.Add] = + TopologyStateUpdate( + TopologyChangeOp.Add, + TopologyStateUpdateElement(TopologyElementId.generate(), mapping), + protocolVersion, + ) + + def createAdd( + mapping: TopologyStateUpdateMapping, + protocolVersion: RepresentativeProtocolVersion[TopologyTransaction.type], + ): TopologyStateUpdate[TopologyChangeOp.Add] = + TopologyStateUpdate( + TopologyChangeOp.Add, + TopologyStateUpdateElement(TopologyElementId.generate(), mapping), + )( + protocolVersion + ) +} + +final case class DomainGovernanceTransaction private ( + element: DomainGovernanceElement +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + TopologyTransaction.type + ], + override val deserializedFrom: Option[ByteString] = None, +) extends TopologyTransaction[TopologyChangeOp.Replace] { + val op = TopologyChangeOp.Replace + + private def toDomainGovernanceTransactionProtoV1: v1.DomainGovernanceTransaction = { + val mappingP = element.mapping match { + case x: DomainParametersChange => + v1.DomainGovernanceTransaction.Mapping.DomainParametersChange(x.toProtoV1) + } + + v1.DomainGovernanceTransaction(mapping = mappingP) + } + + override def toProtoV1: v1.TopologyTransaction = + v1.TopologyTransaction( + v1.TopologyTransaction.Transaction.DomainGovernance( + toDomainGovernanceTransactionProtoV1 + ) + ) + + override def pretty: Pretty[DomainGovernanceTransaction] = prettyOfClass( + param("element", _.element) + ) + + def reverse: TopologyTransaction[TopologyChangeOp.Replace] = this + + override def asVersion(protocolVersion: ProtocolVersion): DomainGovernanceTransaction = + DomainGovernanceTransaction(element)( + TopologyTransaction.protocolVersionRepresentativeFor(protocolVersion) + ) +} + +object DomainGovernanceTransaction { + def apply( + mapping: DomainGovernanceMapping, + protocolVersion: ProtocolVersion, + ): DomainGovernanceTransaction = + DomainGovernanceTransaction(DomainGovernanceElement(mapping))( + TopologyTransaction.protocolVersionRepresentativeFor(protocolVersion) + ) + + def apply( + element: DomainGovernanceElement, + protocolVersion: ProtocolVersion, + ): DomainGovernanceTransaction = DomainGovernanceTransaction(element)( + TopologyTransaction.protocolVersionRepresentativeFor(protocolVersion) + ) + + private[transaction] def fromProtoV1( + protoTopologyTransaction: v1.DomainGovernanceTransaction, + bytes: ByteString, + ): ParsingResult[DomainGovernanceTransaction] = { + val mapping: ParsingResult[DomainGovernanceMapping] = protoTopologyTransaction.mapping match { + case v1.DomainGovernanceTransaction.Mapping.DomainParametersChange(domainParametersChange) => + DomainParametersChange.fromProtoV1(domainParametersChange) + + case v1.DomainGovernanceTransaction.Mapping.Empty => + Left(UnrecognizedField("DomainGovernanceTransaction.Mapping is empty")) + } + + mapping.map(mapping => + DomainGovernanceTransaction(DomainGovernanceElement(mapping))( + TopologyTransaction.protocolVersionRepresentativeFor(ProtoVersion(1)), + Some(bytes), + ) + ) + } + + def fromByteString(bytes: ByteString): ParsingResult[DomainGovernanceTransaction] = + for { + converted <- TopologyTransaction.fromByteString(bytes) + result <- converted match { + case _: TopologyStateUpdate[_] => + Left( + ProtoDeserializationError.TransactionDeserialization( + "Expecting DomainGovernanceTransaction, found TopologyStateUpdate" + ) + ) + case domainGovernanceTransaction: DomainGovernanceTransaction => + Right(domainGovernanceTransaction) + + } + } yield result +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransactionX.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransactionX.scala new file mode 100644 index 0000000000..8ff3259e11 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransactionX.scala @@ -0,0 +1,197 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.transaction + +import com.digitalasset.canton.ProtoDeserializationError.* +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.crypto.* +import com.digitalasset.canton.logging.pretty.PrettyInstances.* +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.protocol.v2 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.serialization.{ProtoConverter, ProtocolVersionedMemoizedEvidence} +import com.digitalasset.canton.topology.transaction.TopologyTransactionX.TxHash +import com.digitalasset.canton.version.* +import com.google.protobuf.ByteString +import slick.jdbc.SetParameter + +import scala.reflect.ClassTag + +/** Replace or Remove */ +sealed trait TopologyChangeOpX extends TopologyChangeOpCommon { + def toProto: v2.TopologyChangeOpX + + final def select[TargetOp <: TopologyChangeOpX](implicit + O: ClassTag[TargetOp] + ): Option[TargetOp] = O.unapply(this) +} + +object TopologyChangeOpX { + + /** Adds or replaces an existing record */ + final case object Replace extends TopologyChangeOpX { + override def toProto: v2.TopologyChangeOpX = v2.TopologyChangeOpX.Replace + } + final case object Remove extends TopologyChangeOpX { + override def toProto: v2.TopologyChangeOpX = v2.TopologyChangeOpX.Remove + } + + type Remove = Remove.type + type Replace = Replace.type + + def unapply( + tx: TopologyTransactionX[TopologyChangeOpX, TopologyMappingX] + ): Option[TopologyChangeOpX] = Some(tx.op) + def unapply( + tx: SignedTopologyTransactionX[TopologyChangeOpX, TopologyMappingX] + ): Option[TopologyChangeOpX] = Some(tx.transaction.op) + + def fromProtoV2( + protoOp: v2.TopologyChangeOpX + ): ParsingResult[TopologyChangeOpX] = + protoOp match { + case v2.TopologyChangeOpX.Remove => Right(Remove) + case v2.TopologyChangeOpX.Replace => Right(Replace) + case v2.TopologyChangeOpX.Unrecognized(x) => Left(UnrecognizedEnum(protoOp.name, x)) + } + + implicit val setParameterTopologyChangeOp: SetParameter[TopologyChangeOpX] = (v, pp) => + v match { + case Remove => pp.setInt(1) + case Replace => pp.setInt(2) + } + +} + +/** Change to the distributed domain topology + * + * A topology transaction is a state change to the domain topology. There are different + * types of topology states (so called mappings, because they map some id to some value). + * + * Each mapping has some variables and some combination of these variables makes a + * "unique key". Subsequent changes to that key need to have an incremental serial number. + * + * Topology changes always affect certain identities. Therefore, these topology + * transactions need to be authorized through signatures. + * + * An authorized transaction is called a [[SignedTopologyTransactionX]] + */ +final case class TopologyTransactionX[+Op <: TopologyChangeOpX, +M <: TopologyMappingX] private ( + op: Op, + serial: PositiveInt, + mapping: M, +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + TopologyTransactionX.type + ], + override val deserializedFrom: Option[ByteString] = None, +) extends ProtocolVersionedMemoizedEvidence + with PrettyPrinting + with HasProtocolVersionedWrapper[TopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] { + + def reverse: TopologyTransactionX[TopologyChangeOpX, M] = { + val next = (op: TopologyChangeOpX) match { + case TopologyChangeOpX.Replace => TopologyChangeOpX.Remove + case TopologyChangeOpX.Remove => TopologyChangeOpX.Replace + } + TopologyTransactionX(next, serial = serial.increment, mapping = mapping)( + representativeProtocolVersion, + None, + ) + } + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def selectMapping[TargetMapping <: TopologyMappingX: ClassTag] + : Option[TopologyTransactionX[Op, TargetMapping]] = + mapping + .select[TargetMapping] + .map(_ => this.asInstanceOf[TopologyTransactionX[Op, TargetMapping]]) + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def selectOp[TargetOp <: TopologyChangeOpX: ClassTag]: Option[TopologyTransactionX[TargetOp, M]] = + op.select[TargetOp].map(_ => this.asInstanceOf[TopologyTransactionX[TargetOp, M]]) + + /** returns hash of the given transaction */ + lazy val hash: TxHash = { + TxHash( + Hash.digest( + HashPurpose.TopologyTransactionSignature, + // TODO(#14048) use digest directly to avoid protobuf serialization for hashing + this.getCryptographicEvidence, + HashAlgorithm.Sha256, + ) + ) + } + + override def toByteStringUnmemoized: ByteString = super[HasProtocolVersionedWrapper].toByteString + + def toProtoV2: v2.TopologyTransactionX = v2.TopologyTransactionX( + operation = op.toProto, + serial = serial.value, + mapping = Some(mapping.toProtoV2), + ) + + def asVersion( + protocolVersion: ProtocolVersion + ): TopologyTransactionX[Op, M] = { + TopologyTransactionX[Op, M](op, serial, mapping)( + TopologyTransactionX.protocolVersionRepresentativeFor(protocolVersion) + ) + } + + /** Indicates how to pretty print this instance. + * See `PrettyPrintingTest` for examples on how to implement this method. + */ + override def pretty: Pretty[TopologyTransactionX.this.type] = + prettyOfClass( + unnamedParam(_.mapping), + param("serial", _.serial), + param("op", _.op), + ) + + @transient override protected lazy val companionObj: TopologyTransactionX.type = + TopologyTransactionX +} + +object TopologyTransactionX + extends HasMemoizedProtocolVersionedWrapperCompanion[ + TopologyTransactionX[TopologyChangeOpX, TopologyMappingX] + ] { + + final case class TxHash(hash: Hash) extends AnyVal {} + + override val name: String = "TopologyTransaction" + + type GenericTopologyTransactionX = TopologyTransactionX[TopologyChangeOpX, TopologyMappingX] + + val supportedProtoVersions = SupportedProtoVersions( + ProtoVersion(2) -> VersionedProtoConverter(ProtocolVersion.v30)(v2.TopologyTransactionX)( + supportedProtoVersionMemoized(_)(fromProtoV2), + _.toProtoV2.toByteString, + ) + ) + + def apply[Op <: TopologyChangeOpX, M <: TopologyMappingX]( + op: Op, + serial: PositiveInt, + mapping: M, + protocolVersion: ProtocolVersion, + ): TopologyTransactionX[Op, M] = TopologyTransactionX[Op, M](op, serial, mapping)( + protocolVersionRepresentativeFor(protocolVersion), + None, + ) + + private def fromProtoV2(transactionP: v2.TopologyTransactionX)( + bytes: ByteString + ): ParsingResult[TopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] = { + val v2.TopologyTransactionX(opP, serialP, mappingP) = transactionP + for { + mapping <- ProtoConverter.parseRequired(TopologyMappingX.fromProtoV2, "mapping", mappingP) + serial <- ProtoConverter.parsePositiveInt(serialP) + op <- TopologyChangeOpX.fromProtoV2(opP) + } yield TopologyTransactionX(op, serial, mapping)( + protocolVersionRepresentativeFor(ProtoVersion(2)), + Some(bytes), + ) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/UniquePath.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/UniquePath.scala new file mode 100644 index 0000000000..7229ca0a6a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/UniquePath.scala @@ -0,0 +1,146 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology.transaction + +import com.digitalasset.canton.topology.* +import slick.jdbc.SetParameter + +/** Unique path of a topology transaction + * + * The unique path allows us to distinguish any topology transaction by a unique code. + * In addition, the code is chosen such that we can organise all transactions in a + * binary tree structure such that changes to that tree are mostly local. + * + * The path is defined by + * namespace :: Rest + * and Rest can be + * "nsd" :: content :: topologyElementId (for namespace delegations) + * "uid" :: uid.identifier :: "code" :: element-id + * + * (type, namespace, optional[identifier], optional[element-id]) + * + * Therefore, if we onboard a new participant with many parties, all the changes + * to the topology will be within the "subpath" defined by the namespace. + */ +trait UniquePath { + def dbType: DomainTopologyTransactionType + def namespace: Namespace + def maybeUid: Option[UniqueIdentifier] + def maybeElementId: Option[TopologyElementId] + def toProtoPrimitive: String +} + +final case class UniquePathNamespaceDelegation(namespace: Namespace, elementId: TopologyElementId) + extends UniquePath { + + override def dbType: DomainTopologyTransactionType = + DomainTopologyTransactionType.NamespaceDelegation + override def maybeUid: Option[UniqueIdentifier] = None + override def maybeElementId: Option[TopologyElementId] = Some(elementId) + override lazy val toProtoPrimitive: String = + Seq(namespace.fingerprint.unwrap, dbType.code, elementId.unwrap) + .mkString(SafeSimpleString.delimiter) + +} + +final case class UniquePathSignedTopologyTransaction( + uid: UniqueIdentifier, + dbType: DomainTopologyTransactionType, + elementId: TopologyElementId, +) extends UniquePath { + + override lazy val toProtoPrimitive: String = + Seq( + uid.namespace.fingerprint.unwrap, + UniquePath.uniqueIdentifierCode, + uid.id.unwrap, + dbType.code, + elementId.unwrap, + ) + .mkString(SafeSimpleString.delimiter) + + override def maybeUid: Option[UniqueIdentifier] = Some(uid) + override def maybeElementId: Option[TopologyElementId] = Some(elementId) + + override def namespace: Namespace = uid.namespace + +} + +object UniquePathSignedTopologyTransaction { + + def queryForUid(uniqueIdentifier: UniqueIdentifier): String = + Seq( + uniqueIdentifier.namespace.toProtoPrimitive, + UniquePath.uniqueIdentifierCode, + uniqueIdentifier.id.unwrap, + ) + .mkString(SafeSimpleString.delimiter) + + def forUid( + uid: UniqueIdentifier, + typ: DomainTopologyTransactionType, + elementId: TopologyElementId, + ): UniquePathSignedTopologyTransaction = + UniquePathSignedTopologyTransaction(uid, typ, elementId) + +} + +final case class UniquePathSignedDomainGovernanceTransaction( + uid: UniqueIdentifier, + dbType: DomainTopologyTransactionType, +) extends UniquePath { + + override lazy val toProtoPrimitive: String = + Seq( + uid.namespace.fingerprint.unwrap, + UniquePath.uniqueIdentifierCode, + uid.id.unwrap, + dbType.code, + ) + .mkString(SafeSimpleString.delimiter) + + override def maybeUid: Option[UniqueIdentifier] = Some(uid) + override def maybeElementId: Option[TopologyElementId] = None + + override def namespace: Namespace = uid.namespace +} + +object UniquePath { + + private[topology] val uniqueIdentifierCode = "uid" + + def queryForNamespace(namespace: Namespace): String = namespace.toProtoPrimitive + +} + +sealed case class DomainTopologyTransactionType private (dbInt: Int, code: String) + +object DomainTopologyTransactionType { + + object ParticipantState extends DomainTopologyTransactionType(1, "pas") + object NamespaceDelegation extends DomainTopologyTransactionType(2, "nsd") + object IdentifierDelegation extends DomainTopologyTransactionType(3, "idd") + object OwnerToKeyMapping extends DomainTopologyTransactionType(4, "okm") + object PartyToParticipant extends DomainTopologyTransactionType(5, "ptp") + object SignedLegalIdentityClaim extends DomainTopologyTransactionType(6, "lic") + object PackageUse extends DomainTopologyTransactionType(7, "pau") + object DomainParameters extends DomainTopologyTransactionType(8, "dmp") + object MediatorDomainState extends DomainTopologyTransactionType(9, "mds") + + lazy val all = Seq( + NamespaceDelegation, + IdentifierDelegation, + OwnerToKeyMapping, + PartyToParticipant, + SignedLegalIdentityClaim, + PackageUse, + ParticipantState, + DomainParameters, + MediatorDomainState, + ) + + implicit val setParameterDomainTopologyTransactionType + : SetParameter[DomainTopologyTransactionType] = (v, pp) => pp.setInt(v.dbInt) + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/tracing/SerializableTraceContext.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/tracing/SerializableTraceContext.scala new file mode 100644 index 0000000000..cc66a2b0fb --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/tracing/SerializableTraceContext.scala @@ -0,0 +1,105 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.tracing + +import com.daml.ledger.api.v1.trace_context.TraceContext as DamlTraceContext +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.v0 +import com.digitalasset.canton.version.{ + HasVersionedMessageCompanion, + HasVersionedMessageCompanionCommon, + HasVersionedMessageCompanionDbHelpers, + HasVersionedWrapper, + ProtoVersion, + ProtocolVersion, +} +import com.typesafe.scalalogging.Logger + +/** Wrapper around [[TraceContext]] to keep serialization out of the [[TraceContext]] itself + * and thereby reduce its dependencies. + */ +final case class SerializableTraceContext(traceContext: TraceContext) + extends HasVersionedWrapper[SerializableTraceContext] { + + def unwrap: TraceContext = traceContext + + override protected def companionObj + : HasVersionedMessageCompanionCommon[SerializableTraceContext] = SerializableTraceContext + + def toProtoV0: v0.TraceContext = { + val w3cTraceContext = traceContext.asW3CTraceContext + v0.TraceContext(w3cTraceContext.map(_.parent), w3cTraceContext.flatMap(_.state)) + } + + def toDamlProto: DamlTraceContext = { + val w3cTraceContext = traceContext.asW3CTraceContext + DamlTraceContext(w3cTraceContext.map(_.parent), w3cTraceContext.flatMap(_.state)) + } + + def toDamlProtoOpt: Option[DamlTraceContext] = + Option.when(traceContext != TraceContext.empty)(toDamlProto) +} + +object SerializableTraceContext + extends HasVersionedMessageCompanion[SerializableTraceContext] + with HasVersionedMessageCompanionDbHelpers[SerializableTraceContext] { + val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(0) -> ProtoCodec( + ProtocolVersion.v30, + supportedProtoVersion(v0.TraceContext)(fromProtoV0), + _.toProtoV0.toByteString, + ) + ) + + /** The name of the class as used for pretty-printing */ + override def name: String = "TraceContext" + + val empty: SerializableTraceContext = SerializableTraceContext(TraceContext.empty) + + /** Construct a TraceContext from provided protobuf structure. + * Errors will be logged at a WARN level using the provided storageLogger and an empty TraceContext will be returned. + */ + def fromProtoSafeV0Opt(logger: Logger)( + traceContextP: Option[v0.TraceContext] + ): SerializableTraceContext = + safely(logger)(fromProtoV0Opt)(traceContextP) + + def fromProtoV0Opt( + traceContextP: Option[v0.TraceContext] + ): ParsingResult[SerializableTraceContext] = + for { + tcP <- ProtoConverter.required("traceContext", traceContextP) + tc <- fromProtoV0(tcP) + } yield tc + + def fromProtoV0(tc: v0.TraceContext): ParsingResult[SerializableTraceContext] = + Right(SerializableTraceContext(W3CTraceContext.toTraceContext(tc.traceparent, tc.tracestate))) + + def fromDamlProtoSafeOpt(logger: Logger)( + traceContextP: Option[DamlTraceContext] + ): SerializableTraceContext = + safely(logger)(fromDamlProtoOpt)(traceContextP) + + def fromDamlProtoOpt( + traceContextP: Option[DamlTraceContext] + ): ParsingResult[SerializableTraceContext] = + for { + tcP <- ProtoConverter.required("traceContext", traceContextP) + tc <- fromDamlProto(tcP) + } yield tc + + def fromDamlProto(tc: DamlTraceContext): ParsingResult[SerializableTraceContext] = + Right(SerializableTraceContext(W3CTraceContext.toTraceContext(tc.traceparent, tc.tracestate))) + + private def safely[A]( + logger: Logger + )(fn: A => ParsingResult[SerializableTraceContext])(a: A): SerializableTraceContext = + fn(a) match { + case Left(err) => + logger.warn(s"Failed to deserialize provided trace context: $err") + SerializableTraceContext(TraceContext.empty) + case Right(traceContext) => traceContext + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/tracing/Spanning.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/tracing/Spanning.scala new file mode 100644 index 0000000000..fabb868c01 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/tracing/Spanning.scala @@ -0,0 +1,126 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.tracing + +import cats.data.{EitherT, OptionT} +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.lifecycle.UnlessShutdown +import com.digitalasset.canton.sequencing.AsyncResult +import com.digitalasset.canton.tracing.Spanning.{SpanEndingExecutionContext, SpanWrapper} +import com.digitalasset.canton.util.{Checked, CheckedT} +import io.opentelemetry.api.common.Attributes +import io.opentelemetry.api.trace.{Span, StatusCode, Tracer} + +import scala.concurrent.{ExecutionContext, Future} +import scala.util.control.NonFatal +import scala.util.{Failure, Success} + +trait Spanning { + protected def withSpanFromGrpcContext[A](description: String)( + f: TraceContext => SpanWrapper => A + )(implicit tracer: Tracer): A = { + implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext + withSpan(description)(f)(traceContext, tracer) + } + + protected def withNewTrace[A](description: String)(f: TraceContext => SpanWrapper => A)(implicit + tracer: Tracer + ): A = + withSpan(description)(f)(TraceContext.empty, tracer) + + protected def withSpan[A]( + description: String + )(f: TraceContext => SpanWrapper => A)(implicit traceContext: TraceContext, tracer: Tracer): A = { + val currentSpan = startSpan(description) + + def closeSpan(value: Any): Unit = value match { + case future: Future[_] => + closeOnComplete(future) + case eitherT: EitherT[_, _, _] => + closeSpan(eitherT.value) + case Right(x) => closeSpan(x) // Look into the result of an EitherT + case optionT: OptionT[_, _] => + closeSpan(optionT.value) + case Some(x) => closeSpan(x) // Look into the result of an OptionT + case checkedT: CheckedT[_, _, _, _] => + closeSpan(checkedT.value) + case Checked.Result(_, x) => closeSpan(x) // Look into the result of a CheckedT + case unlessShutdown: UnlessShutdown.Outcome[_] => + // Look into the result of a FutureUnlessShutdown + closeSpan(unlessShutdown.result) + case asyncResult: AsyncResult => + closeSpan(asyncResult.unwrap) + case _ => + currentSpan.end() + } + + def closeOnComplete(f: Future[_]): Unit = + f.onComplete { + case Success(x) => + closeSpan(x) + case Failure(exception) => + recordException(exception).discard + currentSpan.end() + }(SpanEndingExecutionContext) + + def recordException(exception: Throwable) = { + currentSpan.recordException(exception) + currentSpan.setStatus(StatusCode.ERROR, "Operation ended with error") + } + + val result: A = + try { + val childContext = TraceContext(currentSpan.storeInContext(traceContext.context)) + f(childContext)(new SpanWrapper(currentSpan)) + } catch { + case NonFatal(exception) => + recordException(exception).discard + currentSpan.end() + throw exception + } + closeSpan(result) + result + } + + private def startSpan( + description: String + )(implicit parentTraceContext: TraceContext, tracer: Tracer): Span = { + val currentSpan = tracer + .spanBuilder(description) + .setParent(parentTraceContext.context) + .startSpan() + currentSpan.setAttribute("canton.class", getClass.getName) + currentSpan + } +} + +object Spanning { + // this execution context is solely used to end spans, which is a non-blocking operation that + // does not throw any expected exceptions + private object SpanEndingExecutionContext extends ExecutionContext { + override def execute(r: Runnable): Unit = r.run() + override def reportFailure(t: Throwable): Unit = + throw new IllegalStateException("unexpected error ending span", t) + } + + class SpanWrapper(span: Span) { + def addEvent(name: String, attributes: Map[String, String] = Map()): Unit = { + val _ = span.addEvent(name, mapToAttributes(attributes)) + } + def setAttribute(key: String, value: String): Unit = { + val _ = span.setAttribute(s"canton.$key", value) + } + def recordException(exception: Throwable, attributes: Map[String, String] = Map()): Unit = { + val _ = span.recordException(exception, mapToAttributes(attributes)) + } + + def getSpanId: String = span.getSpanContext.getSpanId + } + private def mapToAttributes(map: Map[String, String]): Attributes = + map + .foldRight(Attributes.builder()) { case ((key, value), builder) => + builder.put(s"canton.$key", value) + } + .build() +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/traffic/EventCostCalculator.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/traffic/EventCostCalculator.scala new file mode 100644 index 0000000000..a7e383e825 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/traffic/EventCostCalculator.scala @@ -0,0 +1,48 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.traffic + +import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt} +import com.digitalasset.canton.sequencing.protocol.{ + Batch, + ClosedEnvelope, + GroupRecipient, + MemberRecipient, +} +import com.digitalasset.canton.topology.Member +import com.google.common.annotations.VisibleForTesting + +// TODO(i12907): Precise costs calculations +class EventCostCalculator { + + def computeEventCost( + event: Batch[ClosedEnvelope], + costMultiplier: PositiveInt, + groupToMembers: Map[GroupRecipient, Set[Member]], + ): NonNegativeLong = { + NonNegativeLong.tryCreate( + event.envelopes.map(computeEnvelopeCost(costMultiplier, groupToMembers)).sum + ) + } + + @VisibleForTesting + protected def payloadSize(envelope: ClosedEnvelope): Int = envelope.bytes.size() + + def computeEnvelopeCost( + costMultiplier: PositiveInt, + groupToMembers: Map[GroupRecipient, Set[Member]], + )(envelope: ClosedEnvelope): Long = { + val writeCosts = payloadSize(envelope) + + val recipientsSize = envelope.recipients.allRecipients.toSeq.map { + case recipient: GroupRecipient => groupToMembers.get(recipient).map(_.size).getOrElse(0) + case _: MemberRecipient => 1 + }.sum + + // read costs are based on the write costs and multiplied by the number of recipients with a readVsWrite cost multiplier + val readCosts = writeCosts * recipientsSize * costMultiplier.value / 10000L + + writeCosts + readCosts + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/BinaryFileUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/BinaryFileUtil.scala new file mode 100644 index 0000000000..3cf3c07233 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/BinaryFileUtil.scala @@ -0,0 +1,55 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import com.google.protobuf.ByteString + +import java.io.* +import scala.concurrent.blocking + +/** Write and read byte strings to files. + */ +object BinaryFileUtil { + + def writeByteStringToFile(outputFile: String, bytes: ByteString): Unit = { + @SuppressWarnings(Array("org.wartremover.warts.Var")) + var bos: Option[BufferedOutputStream] = None + try { + bos = Some(new BufferedOutputStream(new FileOutputStream(outputFile))) + bos.foreach { s => + blocking { + s.write(bytes.toByteArray) + } + } + } finally { + bos.foreach(_.close()) + } + } + + def readByteStringFromFile(inputFile: String): Either[String, ByteString] = { + @SuppressWarnings(Array("org.wartremover.warts.Var")) + var bis: Option[BufferedInputStream] = None + try { + bis = Some(new BufferedInputStream(new FileInputStream(inputFile))) + blocking { + bis.map(ByteString.readFrom).toRight("Will not happen as otherwise it would throw") + } + } catch { + case e: IOException => + val f = new java.io.File(inputFile) + if (!f.exists()) + Left(s"No such file [${inputFile}].") + else + Left( + s"File exists but cannot be read [${inputFile}]. ${ErrorUtil.messageWithStacktrace(e)}" + ) + } finally { + bis.foreach(_.close()) + } + } + + def tryReadByteStringFromFile(inputFile: String): ByteString = readByteStringFromFile(inputFile) + .fold(err => throw new IllegalArgumentException(s"Can not load ${inputFile}: $err"), identity) + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ByteStringUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ByteStringUtil.scala new file mode 100644 index 0000000000..cbb75fcced --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ByteStringUtil.scala @@ -0,0 +1,103 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.Order +import cats.syntax.either.* +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.serialization.{ + DefaultDeserializationError, + DeserializationError, + MaxByteToDecompressExceeded, +} +import com.google.protobuf.ByteString + +import java.io.{ByteArrayOutputStream, EOFException} +import java.util.zip.{GZIPInputStream, GZIPOutputStream, ZipException} +import scala.annotation.tailrec + +object ByteStringUtil { + + /** Lexicographic ordering on [[com.google.protobuf.ByteString]]s */ + val orderByteString: Order[ByteString] = new Order[ByteString] { + override def compare(x: ByteString, y: ByteString): Int = { + val iterX = x.iterator() + val iterY = y.iterator() + + @tailrec def go(): Int = { + if (iterX.hasNext) { + if (iterY.hasNext) { + val cmp = iterX.next().compareTo(iterY.next()) + if (cmp == 0) go() else cmp + } else 1 + } else if (iterY.hasNext) -1 + else 0 + } + + go() + } + } + + def compressGzip(bytes: ByteString): ByteString = { + val rawSize = bytes.size() + val compressed = new ByteArrayOutputStream(rawSize) + ResourceUtil.withResource(new GZIPOutputStream(compressed)) { gzipper => + bytes.writeTo(gzipper) + } + ByteString.copyFrom(compressed.toByteArray) + } + + /** If maxBytesToRead is not specified, we decompress all the gunzipper input stream. + * If maxBytesToRead is specified, we decompress maximum maxBytesToRead bytes, and if the input is larger + * we throw MaxBytesToDecompressExceeded error. + */ + def decompressGzip( + bytes: ByteString, + maxBytesLimit: Option[Int], + ): Either[DeserializationError, ByteString] = { + ResourceUtil + .withResourceEither(new GZIPInputStream(bytes.newInput())) { gunzipper => + maxBytesLimit match { + case None => + Right(ByteString.readFrom(gunzipper)) + case Some(max) => + val read = gunzipper.readNBytes(max + 1) + if (read.length > max) { + Left( + MaxByteToDecompressExceeded( + s"Max bytes to decompress is exceeded. The limit is $max bytes." + ) + ) + } else { + Right(ByteString.copyFrom(read)) + } + } + } + .leftMap(errorMapping) + .flatten + } + + /** Based on the final size we either truncate the bytes to fit in that size or pad with 0s + */ + def padOrTruncate(bytes: ByteString, finalSize: NonNegativeInt): ByteString = + if (finalSize == NonNegativeInt.zero) + ByteString.EMPTY + else { + val padSize = finalSize.value - bytes.size() + if (padSize > 0) + bytes.concat(ByteString.copyFrom(new Array[Byte](padSize))) + else if (padSize == 0) bytes + else bytes.substring(0, bytes.size() + padSize) + } + + private def errorMapping(err: Throwable): DeserializationError = { + err match { + // all exceptions that were observed when testing these methods (see also `GzipCompressionTests`) + case ex: ZipException => DefaultDeserializationError(ex.getMessage) + case _: EOFException => + DefaultDeserializationError("Compressed byte input ended too early") + case error => DefaultDeserializationError(error.getMessage) + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ChainUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ChainUtil.scala new file mode 100644 index 0000000000..85d0bef39b --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ChainUtil.scala @@ -0,0 +1,21 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.data.Chain + +/** Provides utility functions for the `cats` implementation of a `Chain`. This is a data-structure similar to a List, + * with constant time prepend and append. Note that the `Chain` has a performance hit when pattern matching as there is + * no constant-time uncons operation. + * + * Documentation on the `cats` `Chain`: https://typelevel.org/cats/datatypes/chain.html. + */ +object ChainUtil { + + def lastOption[A](chain: Chain[A]): Option[A] = { + val revIter = chain.reverseIterator + if (revIter.hasNext) Some(revIter.next()) else None + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/Checked.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/Checked.scala new file mode 100644 index 0000000000..7158aa2e04 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/Checked.scala @@ -0,0 +1,292 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.data.{Chain, EitherT, NonEmptyChain} +import cats.{Applicative, Eval, Functor, MonadError, Now} + +import scala.annotation.tailrec + +/** A monad for aborting and non-aborting errors. Non-aborting errors are accumulated in a [[cats.data.Chain]] + * until the first aborting error is hit. You can think of [[com.digitalasset.canton.util.Checked]] as an extension of + * `Either` to also support errors that should not cause the computation to abort. + * + * @tparam A Type of aborting errors + * @tparam N Type of non-aborting errors + * @tparam R Result type of the monad + */ +sealed abstract class Checked[+A, +N, +R] extends Product with Serializable { + import Checked.* + + def map[RR](f: R => RR): Checked[A, N, RR] = this match { + case abort @ Abort(_, _) => abort + case Result(nonaborts, result) => Result(nonaborts, f(result)) + } + + def mapAbort[AA](f: A => AA): Checked[AA, N, R] = this match { + case Abort(abort, nonaborts) => Abort(f(abort), nonaborts) + case r @ Result(_, _) => r + } + + def mapNonaborts[NN](f: Chain[N] => Chain[NN]): Checked[A, NN, R] = this match { + case Abort(abort, nonaborts) => Abort(abort, f(nonaborts)) + case Result(nonaborts, result) => Result(f(nonaborts), result) + } + def mapNonabort[NN](f: N => NN): Checked[A, NN, R] = mapNonaborts(_.map(f)) + + def trimap[AA, NN, RR]( + abortMap: A => AA, + nonabortMap: N => NN, + resultMap: R => RR, + ): Checked[AA, NN, RR] = + this match { + case Abort(abort, nonaborts) => Abort(abortMap(abort), nonaborts.map(nonabortMap)) + case Result(nonaborts, result) => Result(nonaborts.map(nonabortMap), resultMap(result)) + } + + def fold[B](f: (A, Chain[N]) => B, g: (Chain[N], R) => B): B = this match { + case Abort(abort, nonaborts) => f(abort, nonaborts) + case Result(nonaborts, result) => g(nonaborts, result) + } + + def prependNonaborts[NN >: N](nonaborts: Chain[NN]): Checked[A, NN, R] = mapNonaborts( + Chain.concat(nonaborts, _) + ) + def prependNonabort[NN >: N](nonabort: NN): Checked[A, NN, R] = prependNonaborts( + Chain.one(nonabort) + ) + + def appendNonaborts[NN >: N](nonaborts: Chain[NN]): Checked[A, NN, R] = mapNonaborts( + Chain.concat(_, nonaborts) + ) + def appendNonabort[NN >: N](nonabort: NN): Checked[A, NN, R] = appendNonaborts( + Chain.one(nonabort) + ) + + /** Applicative product operation. Errors from `this` take precedence over `other` */ + def product[AA >: A, NN >: N, RR](other: Checked[AA, NN, RR]): Checked[AA, NN, (R, RR)] = + this match { + case abort @ Abort(_, _) => abort + case Result(nonaborts1, result1) => + other match { + case abort @ Abort(_, _) => abort.prependNonaborts(nonaborts1) + case Result(nonaborts2, result2) => + Result(Chain.concat(nonaborts1, nonaborts2), (result1, result2)) + } + } + + /** Applicative operation. Consistent with the monadic [[flatMap]] according to Cats' laws, i.e., + * {{{ + * x.ap(f) = for { g <- f; y <- x } yield g(x) + * }}} + */ + def ap[AA >: A, NN >: N, RR](f: Checked[AA, NN, R => RR]): Checked[AA, NN, RR] = + f.product(this).map { case (g, x) => g(x) } + + /** Reverse applicative operation. Errors from the argument (= `this`) take precedence over those from the function. */ + def reverseAp[AA >: A, NN >: N, RR](f: Checked[AA, NN, R => RR]): Checked[AA, NN, RR] = + this.product(f).map { case (x, g) => g(x) } + + def flatMap[AA >: A, NN >: N, RR](f: R => Checked[AA, NN, RR]): Checked[AA, NN, RR] = this match { + case abort @ Abort(_, _) => abort + case Result(nonaborts, result) => f(result).prependNonaborts(nonaborts) + } + + def biflatMap[AA, NN >: N, RR]( + f: A => Checked[AA, NN, RR], + g: R => Checked[AA, NN, RR], + ): Checked[AA, NN, RR] = + this match { + case Abort(abort, nonaborts) => f(abort).prependNonaborts(nonaborts) + case Result(nonaborts, result) => g(result).prependNonaborts(nonaborts) + } + + def abortFlatMap[AA, NN >: N, RR >: R](f: A => Checked[AA, NN, RR]): Checked[AA, NN, RR] = + biflatMap(f, Checked.result) + + /** Merges aborts with nonaborts, using the given `default` result if no result is contained. */ + def toResult[NN, RR >: R, A1 >: A <: NN, N1 >: N <: NN]( + default: => RR + ): Checked[Nothing, NN, RR] = this match { + case Abort(abort, nonaborts) => Result((nonaborts: Chain[N1]).prepend[NN](abort: A1), default) + case result @ Result(_, _) => result: Checked[Nothing, N1, RR] + } + + def foreach(f: R => Unit): Unit = this match { + case Result(_, result) => f(result) + case _ => () + } + + def exists(pred: R => Boolean): Boolean = this match { + case Result(_, result) => pred(result) + case _ => false + } + + def forall(pred: R => Boolean): Boolean = this match { + case Result(_, result) => pred(result) + case _ => true + } + + /** When [[Checked.Result]], apply the function, marking the result as [[Checked.Result]] + * inside the Applicative's context, keeping the warnings. + * when [[Checked.Abort]], lift the [[Checked.Abort]] into the Applicative's context + */ + def traverse[F[_], AA >: A, NN >: N, RR]( + f: R => F[RR] + )(implicit F: Applicative[F]): F[Checked[AA, NN, RR]] = + this match { + case Result(nonaborts, result) => F.map(f(result))(Result(nonaborts, _)) + case e @ Abort(_, _) => F.pure(e) + } + + /** Discards nonaborts. */ + def toEither: Either[A, R] = this match { + case Abort(abort, _) => Left(abort) + case Result(_, result) => Right(result) + } + + /** Discards the result if there are nonaborts. */ + // Specifies two lower bounds for L as described in https://stackoverflow.com/a/6124549 + def toEitherWithNonaborts[L, A1 >: A <: L, N1 >: N <: L]: Either[NonEmptyChain[L], R] = + this match { + case Abort(abort, nonaborts) => + Left(NonEmptyChain.fromChainPrepend[L](abort: A1, nonaborts: Chain[N1])) + case Result(nonaborts, result) => + NonEmptyChain.fromChain[L](nonaborts: Chain[N1]).toLeft(result) + } + + /** Discards the result if there are nonaborts. */ + def toEitherMergeNonaborts[L >: N](implicit + ev: A <:< NonEmptyChain[L] + ): Either[NonEmptyChain[L], R] = this match { + case Abort(abort, nonaborts) => Left(ev(abort).appendChain[L](nonaborts: Chain[N])) + case Result(nonaborts, result) => NonEmptyChain.fromChain[L](nonaborts: Chain[N]).toLeft(result) + } + + def toOption: Option[R] = this match { + case Abort(_, _) => None + case Result(_, result) => Some(result) + } + + def isAbort: Boolean = this match { + case Abort(_, _) => true + case Result(_, _) => false + } + + def isResult: Boolean = this match { + case Abort(_, _) => false + case Result(_, _) => true + } + + /** Is a [[Checked.Result]] with no errors */ + def successful: Boolean = this match { + case Abort(_, _) => false + case Result(nonaborts, _) => nonaborts.isEmpty + } + def nonaborts: Chain[N] + def getResult: Option[R] = this match { + case Abort(_, _) => None + case Result(_, result) => Some(result) + } + def getAbort: Option[A] = this match { + case Abort(abort, _) => Some(abort) + case Result(_, _) => None + } +} + +object Checked { + final case class Abort[+A, +N](abort: A, override val nonaborts: Chain[N]) + extends Checked[A, N, Nothing] + final case class Result[+N, +R](override val nonaborts: Chain[N], result: R) + extends Checked[Nothing, N, R] + + def abort[A, N, R](abort: A): Checked[A, N, R] = Abort(abort, Chain.empty) + def result[A, N, R](result: R): Checked[A, N, R] = Result(Chain.empty, result) + def continueWithResult[A, N, R](nonabort: N, result: R): Checked[A, N, R] = + Result(Chain.one(nonabort), result) + def continue[A, N](nonabort: N): Checked[A, N, Unit] = continueWithResult(nonabort, ()) + def continuesWithResult[A, N, R](nonaborts: NonEmptyChain[N], result: R): Checked[A, N, R] = + Result(nonaborts.toChain, result) + def continues[A, N](nonaborts: NonEmptyChain[N]): Checked[A, N, Unit] = + continuesWithResult(nonaborts, ()) + def unit[A, N]: Checked[A, N, Unit] = result(()) + + /** Treat [[scala.Left$]] as abort */ + def fromEither[A, R](either: Either[A, R]): Checked[A, Nothing, R] = either.fold(abort, result) + + /** Treat [[scala.Left$]] as non-abort with `default` as the result */ + def fromEitherNonabort[N, R](default: => R)(either: Either[N, R]): Checked[Nothing, N, R] = + either.fold(continueWithResult(_, default), result) + + /** Treat [[scala.Left$]] as a chain of non-aborts with `default` as the result */ + def fromEitherNonaborts[N, R](default: => R)( + either: Either[NonEmptyChain[N], R] + ): Checked[Nothing, N, R] = + either.fold(left => Result(left.toChain, default), result) + + /** Treat [[scala.Left$]] as abort */ + def fromEitherT[F[_], A, R](eitherT: EitherT[F, A, R])(implicit + F: Functor[F] + ): F[Checked[A, Nothing, R]] = + F.map(eitherT.value)(fromEither) + + /** Treat [[scala.Left$]] as non-abort with `default` as the result */ + def fromEitherTNonabort[F[_], N, R](default: => R)(eitherT: EitherT[F, N, R])(implicit + F: Functor[F] + ): F[Checked[Nothing, N, R]] = + F.map(eitherT.value)(fromEitherNonabort(default)) + + /** Treat [[scala.Left$]] as a chain of non-aborts with `default` as the result */ + def fromEitherTNonaborts[F[_], N, R](default: => R)(eitherT: EitherT[F, NonEmptyChain[N], R])( + implicit F: Functor[F] + ): F[Checked[Nothing, N, R]] = + F.map(eitherT.value)(fromEitherNonaborts(default)) + + /** Treat test failure as abort */ + def cond[A, R](test: Boolean, right: => R, left: => A): Checked[A, Nothing, R] = + if (test) Checked.result(right) else Checked.abort(left) + + implicit def cantonUtilMonadErrorForChecked[A, N]: MonadError[Checked[A, N, *], A] = + new MonadError[Checked[A, N, *], A] { + override def map[R, RR](fa: Checked[A, N, R])(f: R => RR): Checked[A, N, RR] = fa.map(f) + + override def pure[R](a: R): Checked[A, N, R] = Checked.result(a) + + override def ap[R, RR](ff: Checked[A, N, R => RR])(fa: Checked[A, N, R]): Checked[A, N, RR] = + fa.ap(ff) + + override def product[R, RR]( + fa: Checked[A, N, R], + fb: Checked[A, N, RR], + ): Checked[A, N, (R, RR)] = fa.product(fb) + + override def map2Eval[R, RR, Z](fr: Checked[A, N, R], frr: Eval[Checked[A, N, RR]])( + f: (R, RR) => Z + ): Eval[Checked[A, N, Z]] = fr match { + case abort @ Abort(_, _) => Now(abort) + case Result(ns, r) => frr.map(_.prependNonaborts(ns).map(f(r, _))) + } + + override def flatMap[R, RR](fa: Checked[A, N, R])( + f: R => Checked[A, N, RR] + ): Checked[A, N, RR] = fa.flatMap(f) + + override def tailRecM[S, R](s: S)(f: S => Checked[A, N, Either[S, R]]): Checked[A, N, R] = { + @tailrec def go(old: Checked[A, N, Either[S, R]]): Checked[A, N, R] = old match { + case abort @ Abort(_, _) => abort + case Result(nonaborts, Left(state)) => go(f(state).prependNonaborts(nonaborts)) + case Result(nonaborts, Right(result)) => Result(nonaborts, result) + } + + go(Checked.result(Left(s))) + } + + override def raiseError[R](abort: A): Checked[A, N, R] = Checked.abort(abort) + + override def handleErrorWith[R](fa: Checked[A, N, R])( + f: A => Checked[A, N, R] + ): Checked[A, N, R] = + fa.abortFlatMap(f) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/CheckedT.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/CheckedT.scala new file mode 100644 index 0000000000..a9b40063da --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/CheckedT.scala @@ -0,0 +1,346 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.data.{Chain, EitherT, Nested, NonEmptyChain, OptionT} +import cats.syntax.either.* +import cats.{Applicative, FlatMap, Functor, Monad, MonadError, Parallel, ~>} +import com.digitalasset.canton.FutureTransformer + +/** Monad Transformer for [[Checked]], allowing the effect of a monad `F` to be combined with the aborting and + * non-aborting failure effect of [[Checked]]. Similar to [[cats.data.EitherT]]. + */ +@FutureTransformer(0) +final case class CheckedT[F[_], A, N, R](value: F[Checked[A, N, R]]) { + + import Checked.{Abort, Result} + + def map[RR](f: R => RR)(implicit F: Functor[F]): CheckedT[F, A, N, RR] = CheckedT( + F.map(value)(_.map(f)) + ) + + def mapAbort[AA](f: A => AA)(implicit F: Functor[F]): CheckedT[F, AA, N, R] = CheckedT( + F.map(value)(_.mapAbort(f)) + ) + + def mapNonaborts[NN](f: Chain[N] => Chain[NN])(implicit F: Functor[F]): CheckedT[F, A, NN, R] = + CheckedT(F.map(value)(_.mapNonaborts(f))) + + def mapNonabort[NN](f: N => NN)(implicit F: Functor[F]): CheckedT[F, A, NN, R] = + this.mapNonaborts(_.map(f)) + + def trimap[AA, NN, RR](abortMap: A => AA, nonabortMap: N => NN, resultMap: R => RR)(implicit + F: Functor[F] + ): CheckedT[F, AA, NN, RR] = + CheckedT(F.map(value)(_.trimap(abortMap, nonabortMap, resultMap))) + + def semiflatMap[RR](f: R => F[RR])(implicit F: Monad[F]): CheckedT[F, A, N, RR] = + flatMap(result => CheckedT.result(f(result))) + + def fold[B](f: (A, Chain[N]) => B, g: (Chain[N], R) => B)(implicit F: Functor[F]): F[B] = + F.map(value)(_.fold(f, g)) + + def prependNonaborts[NN >: N](nonaborts: Chain[NN])(implicit + F: Functor[F] + ): CheckedT[F, A, NN, R] = + mapNonaborts(Chain.concat(nonaborts, _)) + + def prependNonabort[NN >: N](nonabort: NN)(implicit F: Functor[F]): CheckedT[F, A, NN, R] = + prependNonaborts(Chain.one(nonabort)) + + def appendNonaborts[NN >: N](nonaborts: Chain[NN])(implicit + F: Functor[F] + ): CheckedT[F, A, NN, R] = + mapNonaborts(Chain.concat(_, nonaborts)) + + def appendNonabort[NN >: N](nonabort: NN)(implicit F: Functor[F]): CheckedT[F, A, NN, R] = + appendNonaborts(Chain.one(nonabort)) + + /** Applicative product operation. Errors from `this` take precedence over `other` */ + def product[AA >: A, NN >: N, RR](other: CheckedT[F, AA, NN, RR])(implicit + F: Applicative[F] + ): CheckedT[F, AA, NN, (R, RR)] = + CheckedT(F.map(F.product(this.value, other.value)) { case (x, y) => x.product(y) }) + + /** Applicative operation. Consistent with [[flatMap]] according to Cats' laws. + * Errors from the function take precedence over the function argument (=this). + */ + def ap[AA >: A, NN >: N, RR](ff: CheckedT[F, AA, NN, R => RR])(implicit + F: Applicative[F] + ): CheckedT[F, AA, NN, RR] = + CheckedT(F.map(F.product(ff.value, this.value)) { case (f, x) => x.ap(f) }) + + def flatMap[AA >: A, NN >: N, RR]( + f: R => CheckedT[F, AA, NN, RR] + )(implicit F: Monad[F]): CheckedT[F, AA, NN, RR] = + CheckedT(F.flatMap(value) { + case abort @ Abort(_, _) => F.pure(abort) + case Result(nonaborts, result) => F.map(f(result).value)(_.prependNonaborts(nonaborts)) + }) + + def biflatMap[AA, NN >: N, RR](f: A => CheckedT[F, AA, NN, RR], g: R => CheckedT[F, AA, NN, RR])( + implicit F: FlatMap[F] + ): CheckedT[F, AA, NN, RR] = + CheckedT(F.flatMap(value) { + case Abort(abort, nonaborts) => f(abort).prependNonaborts(nonaborts).value + case Result(nonaborts, result) => g(result).prependNonaborts(nonaborts).value + }) + + def abortFlatMap[AA, NN >: N, RR >: R](f: A => CheckedT[F, AA, NN, RR])(implicit + F: Monad[F] + ): CheckedT[F, AA, NN, RR] = + biflatMap(f, CheckedT.resultT(_)) + + def subflatMap[AA >: A, NN >: N, RR](f: R => Checked[AA, NN, RR])(implicit + F: Functor[F] + ): CheckedT[F, AA, NN, RR] = + CheckedT(F.map(value)(_.flatMap(f))) + + def abortSubflatMap[AA, NN >: N, RR >: R]( + f: A => Checked[AA, NN, RR] + )(implicit F: Functor[F]): CheckedT[F, AA, NN, RR] = { + CheckedT(F.map(value)(_.abortFlatMap(f))) + } + + /** Merges aborts with nonaborts, using the given `default` result if no result is contained. */ + def toResult[NN, A1 >: A <: NN, N1 >: N <: NN](default: => R)(implicit + F: Functor[F] + ): CheckedT[F, Nothing, NN, R] = + CheckedT[F, Nothing, NN, R](F.map(value)(_.toResult[NN, R, A1, N1](default))) + + /** Flatmap if the Checked is successful, otherwise return the current result value. */ + def flatMapIfSuccess[RR >: R, AA >: A, NN >: N]( + f: R => CheckedT[F, AA, NN, RR] + )(implicit F: Monad[F]): CheckedT[F, AA, NN, RR] = + CheckedT(F.flatMap(value) { + case abort @ Abort(_, _) => F.pure(abort) + case r @ Result(nonaborts, result) => + if (nonaborts.isEmpty) + f(result).value + else F.pure(r) + }) + + def foreach(f: R => Unit)(implicit F: Functor[F]): F[Unit] = F.map(value)(_.foreach(f)) + def exists(pred: R => Boolean)(implicit F: Functor[F]): F[Boolean] = F.map(value)(_.exists(pred)) + def forall(pred: R => Boolean)(implicit F: Functor[F]): F[Boolean] = F.map(value)(_.forall(pred)) + + /** Discards nonaborts. */ + def toEitherT(implicit F: Functor[F]): EitherT[F, A, R] = EitherT(F.map(value)(_.toEither)) + + /** Discards results if there are nonaborts. */ + def toEitherTWithNonaborts[L, A1 >: A <: L, N1 >: N <: L](implicit + F: Functor[F] + ): EitherT[F, NonEmptyChain[L], R] = + EitherT(F.map(value)(_.toEitherWithNonaborts[L, A1, N1])) + + def toOptionT(implicit F: Functor[F]): OptionT[F, R] = OptionT(F.map(value)(_.toOption)) + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def widenResult[RR >: R]: CheckedT[F, A, N, RR] = this.asInstanceOf[CheckedT[F, A, N, RR]] + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def widenAbort[AA >: A]: CheckedT[F, AA, N, R] = this.asInstanceOf[CheckedT[F, AA, N, R]] + + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def widenNonabort[NN >: N]: CheckedT[F, A, NN, R] = this.asInstanceOf[CheckedT[F, A, NN, R]] +} + +object CheckedT extends CheckedTInstances { + + def abort[N, R]: AbortPartiallyApplied[N, R] = new AbortPartiallyApplied[N, R] + + /** Uses the [[http://typelevel.org/cats/guidelines.html#partially-applied-type-params Partially Applied Type Params technique]] + * for ergonomics. + */ + final private[util] class AbortPartiallyApplied[N, R](private val dummy: Boolean = true) + extends AnyVal { + def apply[F[_], A](abort: F[A])(implicit F: Functor[F]): CheckedT[F, A, N, R] = + CheckedT(F.map(abort)(Checked.abort)) + } + + def abortT[F[_], N, R]: AbortTPartiallyApplied[F, N, R] = new AbortTPartiallyApplied[F, N, R] + final private[util] class AbortTPartiallyApplied[F[_], N, R](private val dummy: Boolean = true) + extends AnyVal { + def apply[A](abort: A)(implicit F: Applicative[F]): CheckedT[F, A, N, R] = + CheckedT(F.pure(Checked.abort(abort))) + } + + def result[A, N]: ResultPartiallyApplied[A, N] = new ResultPartiallyApplied[A, N] + final private[util] class ResultPartiallyApplied[A, N](private val dummy: Boolean = true) + extends AnyVal { + def apply[F[_], R](result: F[R])(implicit F: Functor[F]): CheckedT[F, A, N, R] = + CheckedT(F.map(result)(Checked.result)) + } + + def resultT[F[_], A, N]: ResultTPartiallyApplied[F, A, N] = new ResultTPartiallyApplied[F, A, N] + final private[util] class ResultTPartiallyApplied[F[_], A, N](private val dummy: Boolean = true) + extends AnyVal { + def apply[R](result: R)(implicit F: Applicative[F]): CheckedT[F, A, N, R] = + CheckedT(F.pure(Checked.result(result))) + } + def pure[F[_], A, N]: ResultTPartiallyApplied[F, A, N] = resultT + + def continueWithResultT[F[_], A]: ContinueWithResultTPartiallyApplied[F, A] = + new ContinueWithResultTPartiallyApplied[F, A] + final private[util] class ContinueWithResultTPartiallyApplied[F[_], A]( + private val dummy: Boolean = true + ) extends AnyVal { + def apply[N, R](nonabort: N, result: R)(implicit F: Applicative[F]): CheckedT[F, A, N, R] = + CheckedT(F.pure(Checked.continueWithResult(nonabort, result))) + } + + def continue[A]: ContinuePartiallyApplied[A] = new ContinuePartiallyApplied[A] + final private[util] class ContinuePartiallyApplied[A](private val dummy: Boolean = true) + extends AnyVal { + def apply[F[_], N](nonabort: F[N])(implicit F: Applicative[F]): CheckedT[F, A, N, Unit] = + CheckedT(F.map(nonabort)(Checked.continue)) + } + + def continueT[F[_], A]: ContinueTPartiallyApplied[F, A] = new ContinueTPartiallyApplied[F, A] + final private[util] class ContinueTPartiallyApplied[F[_], A](private val dummy: Boolean = true) + extends AnyVal { + def apply[N](nonabort: N)(implicit F: Applicative[F]): CheckedT[F, A, N, Unit] = + CheckedT(F.pure(Checked.continue(nonabort))) + } + + def fromChecked[F[_]]: FromCheckedPartiallyApplied[F] = new FromCheckedPartiallyApplied[F] + final private[util] class FromCheckedPartiallyApplied[F[_]](private val dummy: Boolean = true) + extends AnyVal { + def apply[A, N, R](checked: Checked[A, N, R])(implicit + F: Applicative[F] + ): CheckedT[F, A, N, R] = + CheckedT(F.pure(checked)) + } + + /** Treat [[scala.Left$]] as abort */ + def fromEitherT[N]: FromEitherTPartiallyApplied[N] = new FromEitherTPartiallyApplied[N] + final private[util] class FromEitherTPartiallyApplied[N](private val dummy: Boolean = true) + extends AnyVal { + def apply[F[_], A, R](eitherT: EitherT[F, A, R])(implicit F: Functor[F]): CheckedT[F, A, N, R] = + CheckedT(F.map(eitherT.value)(Checked.fromEither)) + } + + /** Treat [[scala.Left$]] as non-abort with `default` as the result */ + def fromEitherTNonabort[A]: FromEitherTNonabortPartiallyApplied[A] = + new FromEitherTNonabortPartiallyApplied[A] + final private[util] class FromEitherTNonabortPartiallyApplied[A]( + private val dummy: Boolean = true + ) extends AnyVal { + def apply[F[_], N, R](default: => R, eitherT: EitherT[F, N, R])(implicit + F: Functor[F] + ): CheckedT[F, A, N, R] = + CheckedT(F.map(eitherT.value)(Checked.fromEitherNonabort(default))) + } +} + +trait CheckedTInstances extends CheckedTInstances1 { + + implicit def cantonUtilMonadErrorForCheckedT[F[_], A, N](implicit + F0: Monad[F] + ): MonadError[CheckedT[F, A, N, *], A] = + new CheckedTMonadError[F, A, N] { + implicit val F = F0 + } + + implicit def cantonUtilParallelForCheckedT[M[_], A, N](implicit + P: Parallel[M] + ): Parallel.Aux[CheckedT[M, A, N, *], Nested[P.F, Checked[A, N, *], *]] = + new Parallel[CheckedT[M, A, N, *]] { + type F[x] = Nested[P.F, Checked[A, N, *], x] + + implicit val monadEither: Monad[Checked[A, N, *]] = + Checked.cantonUtilMonadErrorForChecked + + def applicative: Applicative[Nested[P.F, Checked[A, N, *], *]] = + cats.data.Nested.catsDataApplicativeForNested(P.applicative, implicitly) + + def monad: Monad[CheckedT[M, A, N, *]] = CheckedT.cantonUtilMonadErrorForCheckedT(P.monad) + + def sequential: Nested[P.F, Checked[A, N, *], *] ~> CheckedT[M, A, N, *] = + new (Nested[P.F, Checked[A, N, *], *] ~> CheckedT[M, A, N, *]) { + def apply[R](nested: Nested[P.F, Checked[A, N, *], R]): CheckedT[M, A, N, R] = { + val mva = P.sequential(nested.value) + CheckedT(mva) + } + } + + def parallel: CheckedT[M, A, N, *] ~> Nested[P.F, Checked[A, N, *], *] = + new (CheckedT[M, A, N, *] ~> Nested[P.F, Checked[A, N, *], *]) { + def apply[R](checkedT: CheckedT[M, A, N, R]): Nested[P.F, Checked[A, N, *], R] = { + val fea = P.parallel(checkedT.value) + Nested(fea) + } + } + } +} + +trait CheckedTInstances1 extends CheckedTInstances2 { + implicit def cantonUtilApplicativeForCheckedT[F[_], A, N](implicit + F0: Applicative[F] + ): Applicative[CheckedT[F, A, N, *]] = + new CheckedTApplicative[F, A, N] { + implicit val F = F0 + } +} + +trait CheckedTInstances2 { + implicit def cantonUtilFunctorForCheckedT[F[_], A, N](implicit + F0: Functor[F] + ): Functor[CheckedT[F, A, N, *]] = + new CheckedTFunctor[F, A, N] { + implicit val F = F0 + } +} + +private[util] trait CheckedTFunctor[F[_], A, N] extends Functor[CheckedT[F, A, N, *]] { + implicit val F: Functor[F] + override def map[R, RR](checkedT: CheckedT[F, A, N, R])(f: R => RR): CheckedT[F, A, N, RR] = + checkedT.map(f) +} + +private[util] trait CheckedTApplicative[F[_], A, N] extends Applicative[CheckedT[F, A, N, *]] { + implicit val F: Applicative[F] + + override def pure[R](x: R): CheckedT[F, A, N, R] = CheckedT(F.pure(Checked.result(x))) + + override def ap[R, S](ff: CheckedT[F, A, N, R => S])( + fa: CheckedT[F, A, N, R] + ): CheckedT[F, A, N, S] = fa.ap(ff) +} + +private[util] trait CheckedTMonadError[F[_], A, N] + extends MonadError[CheckedT[F, A, N, *], A] + with CheckedTFunctor[F, A, N] { + implicit val F: Monad[F] + override def pure[R](result: R): CheckedT[F, A, N, R] = CheckedT.pure(result) + + override def flatMap[R, RR](x: CheckedT[F, A, N, R])( + f: R => CheckedT[F, A, N, RR] + ): CheckedT[F, A, N, RR] = + x.flatMap(f) + + override def tailRecM[S, R]( + x: S + )(f: S => CheckedT[F, A, N, Either[S, R]]): CheckedT[F, A, N, R] = { + def step: ((Chain[N], S)) => F[Either[(Chain[N], S), Checked[A, N, R]]] = { + case (nonaborts1, s) => + import Checked.{Abort, Result} + F.map(f(s).value) { + case abort @ Abort(_, _) => Right(abort.prependNonaborts(nonaborts1)) + case Result(nonaborts2, sr) => + val nonaborts = Chain.concat(nonaborts1, nonaborts2) + sr.bimap(l => (nonaborts, l), r => Result(nonaborts, r)) + } + } + + CheckedT(F.tailRecM((Chain.empty[N], x))(step)) + } + + override def raiseError[R](abort: A): CheckedT[F, A, N, R] = CheckedT.abortT(abort) + + override def handleErrorWith[R](fa: CheckedT[F, A, N, R])( + f: A => CheckedT[F, A, N, R] + ): CheckedT[F, A, N, R] = + fa.abortFlatMap(f) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/DelayUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/DelayUtil.scala new file mode 100644 index 0000000000..1c79298220 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/DelayUtil.scala @@ -0,0 +1,101 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.concurrent.Threading +import com.digitalasset.canton.lifecycle.{ + FutureUnlessShutdown, + OnShutdownRunner, + PerformUnlessClosing, + UnlessShutdown, +} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.tracing.TraceContext + +import java.util.concurrent.ScheduledExecutorService +import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{Future, Promise} + +/** Utility to create futures that succeed after a given delay. + * + * Inspired by the odelay library, but with a restricted interface to avoid hazardous effects that could be caused + * by the use of a global executor service. + * + * TODO(i4245): Replace all usages by Clock. + */ +object DelayUtil extends NamedLogging { + + override protected val loggerFactory: NamedLoggerFactory = + NamedLoggerFactory.unnamedKey("purpose", "global") + + // use a daemon thread for the executor as it doesn't get explicitly shutdown + private val scheduledExecutorService = + Threading.singleThreadScheduledExecutor("delay-util", noTracingLogger, daemon = true) + + /** Creates a future that succeeds after the given delay. + * The caller must make sure that the future is used only in execution contexts that have not yet been closed. + * Use the `delay(FiniteDuration, FlagCloseable)` method if this might be an issue. + * + * Try to use `Clock` instead! + */ + def delay(delay: FiniteDuration): Future[Unit] = + this.delay(scheduledExecutorService, delay, _.success(())) + + /** Creates a future that succeeds after the given delay provided that `flagCloseable` has not yet been closed then. + * + * Try to use `Clock` instead! + */ + def delay(name: String, delay: FiniteDuration, performUnlessClosing: PerformUnlessClosing)( + implicit traceContext: TraceContext + ): Future[Unit] = + this.delay( + scheduledExecutorService, + delay, + { promise => + val _ = performUnlessClosing.performUnlessClosing(name)(promise.success(())) + }, + ) + + private[util] def delay( + executor: ScheduledExecutorService, + delay: FiniteDuration, + complete: Promise[Unit] => Unit, + ): Future[Unit] = { + val promise = Promise[Unit]() + executor.schedule((() => complete(promise)): Runnable, delay.length, delay.unit) + promise.future + } + + /** Creates a future that succeeds after the given delay provided that `onShutdownRunner` has not yet been closed then. + * The future completes fast with UnlessShutdown.AbortedDueToShutdown if `onShutdownRunner` is already closing. + */ + def delayIfNotClosing(name: String, delay: FiniteDuration, onShutdownRunner: OnShutdownRunner)( + implicit traceContext: TraceContext + ): FutureUnlessShutdown[Unit] = { + val promise = Promise[UnlessShutdown[Unit]]() + val future = promise.future + + import com.digitalasset.canton.lifecycle.RunOnShutdown + val cancelToken = onShutdownRunner.runOnShutdown(new RunOnShutdown() { + val name = s"$functionFullName-shutdown" + def done = promise.isCompleted + def run(): Unit = { + promise.trySuccess(UnlessShutdown.AbortedDueToShutdown).discard + } + }) + + val trySuccess: Runnable = { () => + promise.trySuccess(UnlessShutdown.Outcome(())).discard + // No need to complete the promise on shutdown with an AbortedDueToShutdown since we succeeded, and also + // keeps the list of shutdown tasks from growing indefinitely with each retry + onShutdownRunner.cancelShutdownTask(cancelToken) + } + + // TODO(i4245): Use Clock instead + scheduledExecutorService.schedule(trySuccess, delay.length, delay.unit) + FutureUnlessShutdown(future) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/EitherTUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/EitherTUtil.scala new file mode 100644 index 0000000000..b12e1531c3 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/EitherTUtil.scala @@ -0,0 +1,182 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.data.EitherT +import cats.syntax.either.* +import cats.{Applicative, Functor} +import com.daml.metrics.api.MetricHandle.Timer +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.lifecycle.UnlessShutdown.Outcome +import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, UnlessShutdown} +import com.digitalasset.canton.logging.ErrorLoggingContext +import com.digitalasset.canton.util.Thereafter.syntax.* +import org.slf4j.event.Level + +import scala.concurrent.{ExecutionContext, Future} +import scala.util.control.NonFatal +import scala.util.{Failure, Success, Try} + +/** Utility functions for the `cats ` [[cats.data.EitherT]] monad transformer. + * https://typelevel.org/cats/datatypes/eithert.html + */ +object EitherTUtil { + + /** Similar to `finallyET` but will only call the provided handler if `fn` returns a left/error or fails. */ + def onErrorOrFailure[A, B](errorHandler: () => Unit)( + fn: => EitherT[Future, A, B] + )(implicit executionContext: ExecutionContext): EitherT[Future, A, B] = + fn.thereafter { + case Failure(_) => + errorHandler() + case Success(Left(_)) => + errorHandler() + case _ => () + } + + def onErrorOrFailureUnlessShutdown[A, B](errorHandler: () => Unit)( + fn: => EitherT[FutureUnlessShutdown, A, B] + )(implicit executionContext: ExecutionContext): EitherT[FutureUnlessShutdown, A, B] = + fn.thereafter { + case Failure(_) => + errorHandler() + case Success(UnlessShutdown.Outcome(Left(_))) => + errorHandler() + case _ => () + } + + /** Lifts an `if (cond) then ... else ()` into the `EitherT` applicative */ + def ifThenET[F[_], L](cond: Boolean)(`then`: => EitherT[F, L, _])(implicit + F: Applicative[F] + ): EitherT[F, L, Unit] = + if (cond) Functor[EitherT[F, L, *]].void(`then`) else EitherT.pure[F, L](()) + + def condUnitET[F[_]]: CondUnitEitherTPartiallyApplied[F] = + new CondUnitEitherTPartiallyApplied[F]() + private[util] final class CondUnitEitherTPartiallyApplied[F[_]](private val dummy: Boolean = true) + extends AnyVal { + def apply[L](condition: Boolean, fail: => L)(implicit F: Applicative[F]): EitherT[F, L, Unit] = + EitherT.cond[F](condition, (), fail) + } + + def leftSubflatMap[F[_], A, B, C, BB >: B](x: EitherT[F, A, B])(f: A => Either[C, BB])(implicit + F: Functor[F] + ): EitherT[F, C, BB] = + EitherT(F.map(x.value)(_.leftFlatMap(f))) + + /** Construct an EitherT from a possibly failed future. */ + def fromFuture[E, A](fut: Future[A], errorHandler: Throwable => E)(implicit + ec: ExecutionContext + ): EitherT[Future, E, A] = + liftFailedFuture(fut.map(Right(_)), errorHandler) + + /** Lift a failed future into a Left value. */ + def liftFailedFuture[E, A](fut: Future[Either[E, A]], errorHandler: Throwable => E)(implicit + executionContext: ExecutionContext + ): EitherT[Future, E, A] = + EitherT(fut.recover[Either[E, A]] { case NonFatal(x) => + errorHandler(x).asLeft[A] + }) + + /** Log `message` if `result` fails with an exception or results in a `Left` */ + def logOnError[E, R](result: EitherT[Future, E, R], message: String, level: Level = Level.ERROR)( + implicit + executionContext: ExecutionContext, + loggingContext: ErrorLoggingContext, + ): EitherT[Future, E, R] = { + + def logError(v: Try[Either[E, R]]): Unit = + v match { + case Success(Left(err)) => LoggerUtil.logAtLevel(level, message + " " + err.toString) + case Failure(NonFatal(err)) => LoggerUtil.logThrowableAtLevel(level, message, err) + case _ => () + } + + result.thereafter(logError) + } + + /** Log `message` if `result` fails with an exception or results in a `Left` */ + def logOnErrorU[E, R]( + result: EitherT[FutureUnlessShutdown, E, R], + message: String, + level: Level = Level.ERROR, + )(implicit + executionContext: ExecutionContext, + loggingContext: ErrorLoggingContext, + ): EitherT[FutureUnlessShutdown, E, R] = { + + def logError(v: Try[UnlessShutdown[Either[E, R]]]): Unit = + v match { + case Success(Outcome(Left(err))) => + LoggerUtil.logAtLevel(level, message + " " + err.toString) + case Failure(NonFatal(err)) => LoggerUtil.logThrowableAtLevel(level, message, err) + case _ => () + } + + result.thereafter(logError) + } + + /** Discard `eitherT` and log an error if it does not result in a `Right`. + * This is useful to document that an `EitherT[Future,_,_]` is intentionally not being awaited upon. + */ + def doNotAwait( + eitherT: EitherT[Future, _, _], + failureMessage: => String, + level: Level = Level.ERROR, + )(implicit executionContext: ExecutionContext, loggingContext: ErrorLoggingContext): Unit = + logOnError(eitherT, failureMessage, level = level).discard + + def doNotAwaitUS( + eitherT: EitherT[FutureUnlessShutdown, _, _], + message: => String, + failLevel: Level = Level.ERROR, + shutdownLevel: Level = Level.DEBUG, + )(implicit executionContext: ExecutionContext, loggingContext: ErrorLoggingContext): Unit = { + val failureMessage = s"$message failed" + val shutdownMessage = s"$message aborted due to shutdown" + logOnErrorU(eitherT, failureMessage, level = failLevel).value + .map(_ => ()) + .onShutdown(LoggerUtil.logAtLevel(shutdownLevel, shutdownMessage)) + .discard + } + + /** Measure time of EitherT-based calls, inspired by upstream com.daml.metrics.Timed.future */ + def timed[E, R](timerMetric: Timer)( + code: => EitherT[Future, E, R] + )(implicit executionContext: ExecutionContext): EitherT[Future, E, R] = { + val timer = timerMetric.startAsync() + code.thereafter { _ => + timer.stop() + } + } + + /** Transform an EitherT into a Future.failed on left + * + * Comes handy when having to return io.grpc.StatusRuntimeExceptions + */ + def toFuture[L <: Throwable, R](x: EitherT[Future, L, R])(implicit + executionContext: ExecutionContext + ): Future[R] = + x.foldF(Future.failed, Future.successful) + + def toFutureUnlessShutdown[L <: Throwable, R](x: EitherT[FutureUnlessShutdown, L, R])(implicit + executionContext: ExecutionContext + ): FutureUnlessShutdown[R] = + x.foldF(FutureUnlessShutdown.failed, FutureUnlessShutdown.pure) + + def unit[A]: EitherT[Future, A, Unit] = EitherT(Future.successful(().asRight[A])) + + def unitUS[A]: EitherT[FutureUnlessShutdown, A, Unit] = EitherT( + FutureUnlessShutdown.pure(().asRight[A]) + ) + + object syntax { + implicit class FunctorToEitherT[F[_]: Functor, T](f: F[T]) { + + /** Converts any F[T] into EitherT[F, A, T] */ + def toEitherTRight[A]: EitherT[F, A, T] = + EitherT.right[A](f) + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/EitherUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/EitherUtil.scala new file mode 100644 index 0000000000..41db11fa99 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/EitherUtil.scala @@ -0,0 +1,53 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.syntax.either.* + +import scala.concurrent.Future + +object EitherUtil { + + def unit[A]: Either[A, Unit] = ().asRight[A] + + implicit class RichEither[L, R](val either: Either[L, R]) extends AnyVal { + + /** @param f + * @return this, after evaluation of the side effecting function f if this is a left. + */ + def tapLeft(f: L => Unit): Either[L, R] = either match { + case Left(value) => + f(value) + either + + case Right(_) => either + } + + /** @param f + * @return this, after evaluation of the side effecting function f if this is a right. + */ + def tapRight(f: R => Unit): Either[L, R] = either match { + case Right(value) => + f(value) + either + + case Left(_) => either + } + + def toFuture(f: L => Throwable): Future[R] = either match { + case Left(value) => Future.failed(f(value)) + case Right(value) => Future.successful(value) + } + } + + implicit class RichEitherIterable[L, R](val eithers: Iterable[Either[L, R]]) extends AnyVal { + def collectLeft: Iterable[L] = eithers.collect { case Left(value) => value } + def collectRight: Iterable[R] = eithers.collect { case Right(value) => value } + } + + /** If `condition` is satisfied, return `Right(())`, otherwise, return `Left(fail)`. + */ + def condUnitE[L](condition: Boolean, fail: => L): Either[L, Unit] = + Either.cond(condition, (), fail) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ErrorUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ErrorUtil.scala new file mode 100644 index 0000000000..260fdff60d --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ErrorUtil.scala @@ -0,0 +1,131 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.logging.ErrorLoggingContext + +import java.io.{PrintWriter, StringWriter} +import scala.concurrent.Future +import scala.util.Failure +import scala.util.control.NonFatal + +object ErrorUtil { + + /** Yields a string representation of a throwable (including stack trace and causes). + */ + def messageWithStacktrace(t: Throwable): String = { + val result = new StringWriter() + t.printStackTrace(new PrintWriter(result)) + result.toString + } + + /** Logs and rethrows any throwable. + */ + def withThrowableLogging[T](action: => T, valueOnThrowable: Option[T] = None)(implicit + loggingContext: ErrorLoggingContext + ): T = + try { + action + } catch { + case t: Throwable => + loggingContext.logger.error("Unexpected exception", t)(loggingContext.traceContext) + valueOnThrowable match { + case Some(value) if NonFatal(t) => value + case Some(_) | None => throw t + } + } + + val internalErrorMessage: String = "An internal error has occurred." + + private def logInternalError(t: Throwable)(implicit loggingContext: ErrorLoggingContext): Unit = + loggingContext.logger.error(internalErrorMessage, t)(loggingContext.traceContext) + + /** Throws a throwable and logs it at ERROR level with proper formatting. */ + def internalError(t: Throwable)(implicit loggingContext: ErrorLoggingContext): Nothing = { + logInternalError(t) + throw t + } + + /** Wraps a throwable in [[scala.util.Failure]] and logs it at ERROR level with proper formatting */ + def internalErrorTry( + t: Throwable + )(implicit loggingContext: ErrorLoggingContext): Failure[Nothing] = { + logInternalError(t) + Failure(t) + } + + /** If `condition` is not satisfied, log an ERROR and throw an IllegalArgumentException + * @throws java.lang.IllegalArgumentException + */ + def requireArgument(condition: Boolean, message: => String)(implicit + loggingContext: ErrorLoggingContext + ): Unit = + if (!condition) internalError(new IllegalArgumentException(message)) + + /** If `condition` is not satisfied, log an ERROR and throw an IllegalStateException + * @throws java.lang.IllegalStateException + */ + def requireState(condition: Boolean, message: => String)(implicit + loggingContext: ErrorLoggingContext + ): Unit = + if (!condition) invalidState(message) + + /** Indicate an illegal state by logging an ERROR and throw an IllegalStateException + * @throws java.lang.IllegalStateException + */ + def invalidState(message: => String)(implicit loggingContext: ErrorLoggingContext): Nothing = + internalError(new IllegalStateException(message)) + + /** Indicate an illegal state by logging an ERROR and return a IllegalStateException in a failed future. + * @return The throwable in a failed future. + */ + def invalidStateAsync( + message: => String + )(implicit loggingContext: ErrorLoggingContext): Future[Nothing] = + internalErrorAsync(new IllegalStateException(message)) + + /** Log a throwable at ERROR level with proper formatting. + * @return The throwable in a failed future. + */ + def internalErrorAsync( + t: Throwable + )(implicit loggingContext: ErrorLoggingContext): Future[Nothing] = { + logInternalError(t) + Future.failed(t) + } + + /** Log a throwable at ERROR level with proper formatting. + * @return The throwable in a failed [[com.digitalasset.canton.lifecycle.FutureUnlessShutdown]]. + */ + def internalErrorAsyncShutdown( + t: Throwable + )(implicit loggingContext: ErrorLoggingContext): FutureUnlessShutdown[Nothing] = { + logInternalError(t) + FutureUnlessShutdown.failed(t) + } + + /** If `condition` is not satisfied, log an ERROR and return a failed future with an [[java.lang.IllegalArgumentException]] + */ + def requireArgumentAsync(condition: Boolean, message: => String)(implicit + loggingContext: ErrorLoggingContext + ): Future[Unit] = + if (condition) Future.unit else internalErrorAsync(new IllegalArgumentException(message)) + + /** If `condition` is not satisfied, log an ERROR and return a failed future with an [[java.lang.IllegalStateException]] + */ + def requireStateAsync(condition: Boolean, message: => String)(implicit + loggingContext: ErrorLoggingContext + ): Future[Unit] = + if (condition) Future.unit else internalErrorAsync(new IllegalStateException(message)) + + /** If `condition` is not satisfied, log an ERROR and return a failed FutureUnlessShutdown with an [[java.lang.IllegalStateException]] + */ + def requireStateAsyncShutdown(condition: Boolean, message: => String)(implicit + loggingContext: ErrorLoggingContext + ): FutureUnlessShutdown[Unit] = + if (condition) FutureUnlessShutdown.unit + else internalErrorAsyncShutdown(new IllegalStateException(message)) + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/FutureInstances.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/FutureInstances.scala new file mode 100644 index 0000000000..76cbef9b8e --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/FutureInstances.scala @@ -0,0 +1,39 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.arrow.FunctionK +import cats.{Applicative, Monad, Parallel, ~>} + +import scala.concurrent.{ExecutionContext, Future} + +object FutureInstances { + + def parallelApplicativeFuture(implicit ec: ExecutionContext): Applicative[Future] = + new Applicative[Future] { + override def pure[A](x: A): Future[A] = Future.successful(x) + + override def ap[A, B](ff: Future[A => B])(fa: Future[A]): Future[B] = + ff.zipWith(fa)(_.apply(_)) + + override def product[A, B](fa: Future[A], fb: Future[B]): Future[(A, B)] = fa.zip(fb) + } + + implicit def parallelFuture(implicit ec: ExecutionContext): Parallel[Future] = + new Parallel[Future] { + override type F[X] = Future[X] + + def parallel: Future ~> Future = FunctionK.id + + def sequential: Future ~> Future = FunctionK.id + + // The standard applicative instance on Future that runs everything in parallel + def applicative: Applicative[Future] = parallelApplicativeFuture + + // The Cats monad instance for Future runs applicative operations sequentially since 2.7.0 + // but there are no guarantees w.r.t. the evaluation behaviour. + def monad: Monad[Future] = Monad[Future] + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/FutureUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/FutureUtil.scala new file mode 100644 index 0000000000..a135fcc609 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/FutureUtil.scala @@ -0,0 +1,132 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import com.digitalasset.canton.concurrent.DirectExecutionContext +import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown} +import com.digitalasset.canton.logging.ErrorLoggingContext +import org.slf4j.event.Level + +import java.util.regex.Pattern +import scala.concurrent.{ExecutionContext, Future} +import scala.math.Ordered.* +import scala.util.Try +import scala.util.control.NonFatal + +object FutureUtil { + + /** If the future fails, log the associated error and re-throw. The returned future completes after logging. + */ + def logOnFailure[T]( + future: Future[T], + failureMessage: => String, + onFailure: Throwable => Unit = _ => (), + level: => Level = Level.ERROR, + closeContext: Option[CloseContext] = None, + )(implicit loggingContext: ErrorLoggingContext): Future[T] = { + implicit val ec: ExecutionContext = DirectExecutionContext(loggingContext.noTracingLogger) + future.recover { + // Catching NonFatal only, because a future cannot fail with fatal throwables. + // Also, it may be a bad idea to run a callback after an OutOfMemoryError. + case NonFatal(err) => + // if the optional close context is closing down, log at most with INFO + if (closeContext.exists(_.context.isClosing) && level > Level.INFO) { + LoggerUtil.logThrowableAtLevel( + Level.INFO, + s"Logging the following failure on INFO instead of $level due to an ongoing shutdown: $failureMessage", + err, + ) + } else { + LoggerUtil.logThrowableAtLevel(level, failureMessage, err) + } + try { + onFailure(err) + } catch { + case t: Throwable => // Catching all throwables, because we are merely logging. + // Always log at ERROR independent of `level` because we don't expect `onFailure` to throw. + loggingContext.logger.error( + "An unexpected exception occurred while handling a failed future.", + t, + )(loggingContext.traceContext) + t.addSuppressed(err) + throw t + } + throw err + } + } + + /** If the future fails, log the associated error and re-throw. The returned future completes after logging. + */ + def logOnFailureUnlessShutdown[T]( + future: FutureUnlessShutdown[T], + failureMessage: => String, + onFailure: Throwable => Unit = _ => (), + level: => Level = Level.ERROR, + closeContext: Option[CloseContext] = None, + )(implicit loggingContext: ErrorLoggingContext): FutureUnlessShutdown[T] = { + FutureUnlessShutdown( + logOnFailure(future.unwrap, failureMessage, onFailure, level, closeContext) + ) + } + + /** Discard `future` and log an error if it does not complete successfully. + * This is useful to document that a `Future` is intentionally not being awaited upon. + */ + def doNotAwait( + future: Future[?], + failureMessage: => String, + onFailure: Throwable => Unit = _ => (), + level: => Level = Level.ERROR, + closeContext: Option[CloseContext] = None, + )(implicit loggingContext: ErrorLoggingContext): Unit = { + val _ = logOnFailure(future, failureMessage, onFailure, level, closeContext) + } + + /** [[doNotAwait]] but for FUS + */ + def doNotAwaitUnlessShutdown( + future: FutureUnlessShutdown[?], + failureMessage: => String, + onFailure: Throwable => Unit = _ => (), + level: => Level = Level.ERROR, + closeContext: Option[CloseContext] = None, + )(implicit loggingContext: ErrorLoggingContext): Unit = { + doNotAwait(future.unwrap, failureMessage, onFailure, level, closeContext) + } + + /** Variant of [[doNotAwait]] that also catches non-fatal errors thrown while constructing the future. */ + def catchAndDoNotAwait( + future: => Future[?], + failureMessage: => String, + onFailure: Throwable => Unit = _ => (), + level: => Level = Level.ERROR, + )(implicit loggingContext: ErrorLoggingContext): Unit = { + val wrappedFuture = Future.fromTry(Try(future)).flatten + doNotAwait(wrappedFuture, failureMessage, onFailure, level) + } + + lazy val defaultStackTraceFilter: Thread => Boolean = { + // Include threads directly used by Canton (incl. tests). + // Excludes threads used by the ledger api server, grpc, ... + val patterns = Seq( + ".*-env-execution-context.*", + ".*-test-execution-context.*", + ".*-env-scheduler.*", + ".*-test-execution-context-monitor.*", + ".*-wallclock.*", + ".*-remoteclock.*", + ".*delay-util.*", + ".*-ccf-execution-context.*", + ".*-fabric-sequencer-execution-context.*", + ".*-db-execution-context.*", + "ScalaTest-run.*", + ) + + // Take the disjunction of patterns. + val isRelevant = Pattern + .compile(patterns.map(p => s"($p)").mkString("|")) + .asMatchPredicate() + thread => isRelevant.test(thread.getName) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/HasFlushFuture.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/HasFlushFuture.scala new file mode 100644 index 0000000000..196269119b --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/HasFlushFuture.scala @@ -0,0 +1,120 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import com.digitalasset.canton.concurrent.DirectExecutionContext +import com.digitalasset.canton.config.NonNegativeDuration +import com.digitalasset.canton.lifecycle.SyncCloseable +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.{DiscardOps, config} + +import scala.collection.concurrent.TrieMap +import scala.concurrent.{ExecutionContext, Future, Promise} +import scala.util.Success + +/** Provides a single flush [[scala.concurrent.Future]] that runs asynchronously. Tasks can be chained onto the flush + * future, although they will not run sequentially. + */ +trait HasFlushFuture + extends + // This trait must come after the NamedLogging trait in the class linearization to avoid initialization issue + // with NamedLogging.logger. We therefore explicitly extend NamedLogging and do not declare it as a self type. + NamedLogging { + + /** Adds the task `future` to the flush future so that [[doFlush]] completes only after `future` has completed. + * Logs an error if the `future` fails with an exception. + */ + protected def addToFlushAndLogError( + name: String + )(future: Future[_])(implicit loggingContext: ErrorLoggingContext): Unit = { + addToFlushWithoutLogging(name)(FutureUtil.logOnFailure(future, s"$name failed")) + } + + /** Adds the task `future` to the flush future so that [[doFlush]] completes only after `future` has completed. + * The caller is responsible for logging any exceptions thrown inside the future. + */ + protected def addToFlushWithoutLogging(name: String)(future: Future[_]): Unit = + if (future.isCompleted) () + else { + val promise = Promise[Unit]() + val newTask = new HasFlushFuture.NamedTask(name, promise.future) + tasks.put(newTask, ()).discard + // Make sure to remove the task again when the future is done. + // This runs via a direct execution context as part of the task's execution context + // so that we don't have to worry about execution contexts being closed here. + val removeF = future.transform { _ => + tasks.remove(newTask).discard + Success(()) + }(directExecutionContext) + promise.completeWith(removeF) + } + + /** Returns a future that completes after all added futures have completed. The returned future never fails. */ + protected def doFlush(): Future[Unit] = { + val snapshot = tasks.readOnlySnapshot().keys + flushFutureForSnapshot(snapshot) + } + + // Invariant: The contained futures never fail with an exception + private val tasks: TrieMap[HasFlushFuture.NamedTask, Unit] = + TrieMap.empty[HasFlushFuture.NamedTask, Unit] + + private val directExecutionContext: ExecutionContext = DirectExecutionContext(noTracingLogger) + + /** Returns the list of currently incomplete tasks. + * Use only for inspection and debugging. + */ + def snapshotIncomplete: Seq[String] = + tasks.readOnlySnapshot().keys.filterNot(_.future.isCompleted).map(_.name).toSeq + + protected def flushCloseable(name: String, timeout: NonNegativeDuration): SyncCloseable = { + implicit val traceContext: TraceContext = TraceContext.empty + val snapshot = tasks.readOnlySnapshot().keys + // It suffices to build the flush future only once, + // but for pretty-printing we want to build the description for each log message + // so that we can filter out the already completed tasks. + val future = flushFutureForSnapshot(snapshot) + def mkDescription(): String = { + s"$name with tasks ${snapshot.filter(!_.future.isCompleted).mkString(", ")}" + } + SyncCloseable(name, timeout.await_(mkDescription())(future)) + } + + private def flushFutureForSnapshot(snapshot: Iterable[HasFlushFuture.NamedTask]): Future[Unit] = { + snapshot.foldLeft(Future.unit) { (acc, task) => + val future = task.future + if (future.isCompleted) acc + else { + acc.zipWith(future)((_, _) => ())(directExecutionContext) + } + } + } +} + +object HasFlushFuture { + // Not a case class so that we get by-reference equality + private class NamedTask(val name: String, val future: Future[_]) extends PrettyPrinting { + override def pretty: Pretty[NamedTask] = + prettyOfString(x => if (x.future.isCompleted) x.name + " (completed)" else x.name) + } +} + +/** Stand-alone implementation of [[HasFlushFuture]] */ +class FlushFuture(name: String, override protected val loggerFactory: NamedLoggerFactory) + extends HasFlushFuture { + + override def addToFlushAndLogError(name: String)(future: Future[_])(implicit + loggingContext: ErrorLoggingContext + ): Unit = super.addToFlushAndLogError(name)(future) + + override def addToFlushWithoutLogging(name: String)(future: Future[_]): Unit = + super.addToFlushWithoutLogging(name)(future) + + def flush(): Future[Unit] = doFlush() + + def asCloseable(timeout: config.NonNegativeDuration): SyncCloseable = + flushCloseable(name, timeout) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/HexString.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/HexString.scala new file mode 100644 index 0000000000..1fae52c3e1 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/HexString.scala @@ -0,0 +1,54 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import com.google.protobuf.ByteString + +/** Conversion functions to and from hex strings. */ +object HexString { + + def toHexString(bytes: ByteString): String = toHexString(bytes.toByteArray) + + /** Convert a ByteString to hex-string. + * The output size will be equal to the length configured if it's even, or + * to the length + 1 if it's odd. + */ + def toHexString(bytes: ByteString, length: Int): String = { + // Every byte is 2 Hex characters, this is why we devise by 2 + val maxlength = bytes.size() min Math.round(length / 2.toDouble).toInt + toHexString( + bytes.substring(0, maxlength).toByteArray + ) + } + + def toHexString(bytes: Array[Byte]): String = bytes.map(b => f"$b%02x").mkString("") + + /** Parse a hex-string `s` to a byte array. */ + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def parse(s: String): Option[Array[Byte]] = { + val optBytes: Iterator[Option[Byte]] = s + .grouped(2) + .map(s2 => + for { + _ <- if (s2.lengthCompare(2) != 0) None else Some(()) + b1 = Character.digit(s2.charAt(0), 16) + b2 = Character.digit(s2.charAt(1), 16) + _ <- if (b1 == -1 || b2 == -1) None else Some(()) + b = ((b1 << 4) + b2).asInstanceOf[Byte] + } yield b + ) + optBytes + .foldRight(Option.apply(Seq[Byte]())) { case (bOrErr, bytesOrErr) => + for { + b <- bOrErr + bytes <- bytesOrErr + } yield bytes.+:(b) + } + .map(_.toArray) + } + + def parseToByteString(s: String): Option[ByteString] = { + parse(s).map(ByteString.copyFrom) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/LazyValWithContext.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/LazyValWithContext.scala new file mode 100644 index 0000000000..96501a266f --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/LazyValWithContext.scala @@ -0,0 +1,56 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import scala.concurrent.blocking + +/** "Implements" a `lazy val` field whose initialization expression can refer to implicit context information of type `Context`. + * The "val" is initialized upon the first call to [[get]], using the context information supplied for this call, + * like a `lazy val`. + * + * Instead of a plain lazy val field without context + *
class C { lazy val f: T = initializer }
+ * use the following code to pass in a `Context`: + *
+  * class C {
+  *   private[this] val _f: LazyValWithContext[T, Context] = new LazyValWithContext[T, Context](context => initializer)
+  *   def f(implicit context: Context): T = _f.get
+  * }
+  * 
+ * + * This class implements the same scheme as how the Scala 2.13 compiler implements `lazy val`s, + * as explained on https://docs.scala-lang.org/sips/improved-lazy-val-initialization.html (version V1) + * along with its caveats. + * + * @see TracedLazyVal To be used when the initializer wants to log something using the logger of the surrounding class + * @see ErrorLoggingLazyVal To be used when the initializer wants to log errors using the logger of the caller + */ +final class LazyValWithContext[T, Context](initialize: Context => T) { + + @SuppressWarnings(Array("org.wartremover.warts.Var")) + @volatile private var bitmap_0: Boolean = false + + @SuppressWarnings(Array("org.wartremover.warts.Var", "org.wartremover.warts.Null")) + private var value_0: T = _ + + private def value_lzycompute(context: Context): T = { + blocking { + this.synchronized { + if (!bitmap_0) { + value_0 = initialize(context) + bitmap_0 = true + } + } + } + value_0 + } + + def get(implicit context: Context): T = + if (bitmap_0) value_0 else value_lzycompute(context) +} + +trait LazyValWithContextCompanion[Context] { + def apply[T](initialize: Context => T): LazyValWithContext[T, Context] = + new LazyValWithContext[T, Context](initialize) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/LengthLimitedByteString.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/LengthLimitedByteString.scala new file mode 100644 index 0000000000..1a5d7c5ed5 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/LengthLimitedByteString.scala @@ -0,0 +1,173 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import com.digitalasset.canton.checked +import com.google.protobuf.ByteString + +/** This trait wraps a ByteString that is limited to a certain maximum length. + * Classes implementing this trait expose `create` and `tryCreate` methods to safely (and non-safely) construct + * such a ByteString. + * + * The canonical use case is ensuring that we don't encrypt more data than the underlying crypto algorithm can: + * for example, Rsa2048OaepSha256 can only encrypt 190 bytes at a time. + */ +sealed trait LengthLimitedByteString extends NoCopy { + def str: ByteString + + /** Maximum number of byte characters allowed. */ + def maxLength: Int + + // optionally give a name for the type of ByteString you are attempting to validate for nicer error messages + def name: Option[String] = None + + @SuppressWarnings(Array("org.wartremover.warts.IsInstanceOf")) + def canEqual(a: Any): Boolean = + a.isInstanceOf[LengthLimitedByteString] || a.isInstanceOf[ByteString] + + override def equals(that: Any): Boolean = + that match { + case that: LengthLimitedByteString => + canEqual(this) && this.str == that.str && this.maxLength == that.maxLength + case that: ByteString => canEqual(this) && this.str == that + case _ => false + } + + override def hashCode(): Int = str.hashCode() + + require( + str.size() <= maxLength, + s"The given ${name.getOrElse("byteString")} has a maximum length of $maxLength but a ${name + .getOrElse("byteString")} of length ${str.size()} ('$str') was given", + ) + + def unwrap: ByteString = str + + override def toString: String = str.toString + + def nonEmpty: Boolean = !str.isEmpty + + def tryConcatenate(that: LengthLimitedByteString): LengthLimitedByteStringVar = + new LengthLimitedByteStringVar( + this.unwrap.concat(that.unwrap), + this.maxLength + that.maxLength, + )() + + def tryConcatenate(that: ByteString): LengthLimitedByteStringVar = + new LengthLimitedByteStringVar(this.unwrap.concat(that), this.maxLength + that.size())() + +} + +object LengthLimitedByteString { + + def errorMsg(tooLongStr: ByteString, maxLength: Int, name: Option[String] = None): String = + s"The given ${name.getOrElse("byteString")} has a maximum length of $maxLength but a ${name + .getOrElse("byteString")} of length ${tooLongStr.size()} was given" + + def tryCreate( + str: ByteString, + maxLength: Int, + name: Option[String] = None, + ): LengthLimitedByteString = { + new LengthLimitedByteStringVar(str, maxLength)(name) + } + + def create( + str: ByteString, + maxLength: Int, + name: Option[String] = None, + ): Either[String, LengthLimitedByteString] = { + Either.cond( + str.size() <= maxLength, + new LengthLimitedByteStringVar(str, maxLength)(name), + errorMsg(str, maxLength, name), + ) + } + +} + +final case class ByteString190(str: ByteString)(override val name: Option[String] = None) + extends LengthLimitedByteString { + override def maxLength: Int = ByteString190.maxLength +} + +object ByteString190 extends LengthLimitedByteStringCompanion[ByteString190] { + override def maxLength: Int = 190 + + override protected def factoryMethod(str: ByteString)(name: Option[String]): ByteString190 = + new ByteString190(str)(name) +} + +final case class ByteString256(str: ByteString)(override val name: Option[String] = None) + extends LengthLimitedByteString { + override def maxLength: Int = ByteString256.maxLength +} + +object ByteString256 extends LengthLimitedByteStringCompanion[ByteString256] { + override def maxLength: Int = 256 + + override protected def factoryMethod(str: ByteString)(name: Option[String]): ByteString256 = + new ByteString256(str)(name) +} + +final case class ByteString4096(str: ByteString)(override val name: Option[String] = None) + extends LengthLimitedByteString { + override def maxLength: Int = ByteString6144.maxLength +} + +object ByteString4096 extends LengthLimitedByteStringCompanion[ByteString4096] { + override def maxLength: Int = 4096 + + override protected def factoryMethod(str: ByteString)(name: Option[String]): ByteString4096 = + new ByteString4096(str)(name) +} + +final case class ByteString6144(str: ByteString)(override val name: Option[String] = None) + extends LengthLimitedByteString { + override def maxLength: Int = ByteString6144.maxLength +} + +object ByteString6144 extends LengthLimitedByteStringCompanion[ByteString6144] { + override def maxLength: Int = 6144 + + override protected def factoryMethod(str: ByteString)(name: Option[String]): ByteString6144 = + new ByteString6144(str)(name) +} + +final case class LengthLimitedByteStringVar(override val str: ByteString, maxLength: Int)( + override val name: Option[String] = None +) extends LengthLimitedByteString + +object LengthLimitedByteStringVar { + private[this] def apply(str: ByteString): LengthLimitedByteStringVar = + throw new UnsupportedOperationException("Use create or tryCreate methods") +} + +/** Trait that implements method commonly needed in the companion object of an [[LengthLimitedByteString]] */ +trait LengthLimitedByteStringCompanion[A <: LengthLimitedByteString] { + + val empty: A = checked(factoryMethod(ByteString.EMPTY)(None)) + + /** The maximum byteString length. Should not be overwritten with `val` to avoid initialization issues. */ + def maxLength: Int + + /** Factory method for creating a ByteString. + * + * @throws java.lang.IllegalArgumentException if `str` is longer than [[maxLength]] + */ + protected def factoryMethod(str: ByteString)(name: Option[String]): A + + def create(str: ByteString, name: Option[String] = None): Either[String, A] = + Either.cond( + str.size() <= maxLength, + factoryMethod(str)(name), + LengthLimitedByteString.errorMsg(str, maxLength, name), + ) + + private[this] def apply(str: ByteString): A = + throw new UnsupportedOperationException("Use create or tryCreate methods") + + def tryCreate(str: ByteString, name: Option[String] = None): A = + factoryMethod(str)(name) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/LfTransactionUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/LfTransactionUtil.scala new file mode 100644 index 0000000000..4cd54f73f4 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/LfTransactionUtil.scala @@ -0,0 +1,224 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.{Monad, Order} +import com.daml.lf.data.* +import com.daml.lf.transaction.TransactionVersion +import com.daml.lf.value.Value +import com.digitalasset.canton.LfPartyId +import com.digitalasset.canton.protocol.* + +import scala.annotation.nowarn + +/** Helper functions to work with `com.digitalasset.daml.lf.transaction.GenTransaction`. + * Using these helper functions is useful to provide a buffer from upstream changes. + */ +object LfTransactionUtil { + + implicit val orderTransactionVersion: Order[TransactionVersion] = + Order.by[TransactionVersion, String](_.protoValue)(Order.fromOrdering) + + /** Return the template associated to a node. + * Note: unlike [[nodeTemplates]] below, it does not return the interface package + * for exercise by interface nodes. + */ + def nodeTemplate(node: LfActionNode): LfTemplateId = node match { + case n: LfNodeCreate => n.coinst.template + case n: LfNodeFetch => n.templateId + case n: LfNodeExercises => n.templateId + case n: LfNodeLookupByKey => n.templateId + } + + /** Return the templates associated to a node. */ + def nodeTemplates(node: LfActionNode): Seq[LfTemplateId] = node match { + case n: LfNodeCreate => Seq(n.coinst.template) + case n: LfNodeFetch => Seq(n.templateId) + case n: LfNodeExercises => n.templateId +: n.interfaceId.toList + case n: LfNodeLookupByKey => Seq(n.templateId) + } + + def consumedContractId(node: LfActionNode): Option[LfContractId] = node match { + case _: LfNodeCreate => None + case _: LfNodeFetch => None + case nx: LfNodeExercises if nx.consuming => Some(nx.targetCoid) + case _: LfNodeExercises => None + case _: LfNodeLookupByKey => None + } + + def contractId(node: LfActionNode): Option[LfContractId] = node match { + case n: LfNodeCreate => Some(n.coid) + case n: LfNodeFetch => Some(n.coid) + case n: LfNodeExercises => Some(n.targetCoid) + case n: LfNodeLookupByKey => n.result + } + + def usedContractId(node: LfActionNode): Option[LfContractId] = node match { + case n: LfNodeCreate => None + case n: LfNodeFetch => Some(n.coid) + case n: LfNodeExercises => Some(n.targetCoid) + case n: LfNodeLookupByKey => n.result + } + + /** All contract IDs referenced with a Daml `com.daml.lf.value.Value` */ + def referencedContractIds(value: Value): Set[LfContractId] = value.cids + + /** Whether or not a node has a random seed */ + def nodeHasSeed(node: LfNode): Boolean = node match { + case _: LfNodeCreate => true + case _: LfNodeExercises => true + case _: LfNodeFetch => false + case _: LfNodeLookupByKey => false + case _: LfNodeRollback => false + } + + private[this] def suffixForDiscriminator( + unicumOfDiscriminator: LfHash => Option[Unicum], + cantonContractId: CantonContractIdVersion, + )(discriminator: LfHash): Bytes = { + /* If we can't find the discriminator we leave it unchanged, + * because this could refer to an input contract of the transaction. + * The well-formedness checks ensure that unsuffixed discriminators of created contracts are fresh, + * i.e., we suffix a discriminator either everywhere in the transaction or nowhere + * even though the map from discriminators to unicum is built up in post-order of the nodes. + */ + unicumOfDiscriminator(discriminator).fold(Bytes.Empty)(_.toContractIdSuffix(cantonContractId)) + } + + def suffixContractInst( + unicumOfDiscriminator: LfHash => Option[Unicum], + cantonContractId: CantonContractIdVersion, + )(contractInst: LfContractInst): Either[String, LfContractInst] = { + contractInst.unversioned + .suffixCid(suffixForDiscriminator(unicumOfDiscriminator, cantonContractId)) + .map(unversionedContractInst => // traverse being added in daml-lf + contractInst.map(_ => unversionedContractInst) + ) + } + + def suffixNode( + unicumOfDiscriminator: LfHash => Option[Unicum], + cantonContractId: CantonContractIdVersion, + )(node: LfActionNode): Either[String, LfActionNode] = { + node.suffixCid(suffixForDiscriminator(unicumOfDiscriminator, cantonContractId)) + } + + /** Monadic visit to all nodes of the transaction in execution order. + * Exercise nodes are visited twice: when execution reaches them and when execution leaves their body. + * Crashes on malformed transactions (see `com.daml.lf.transaction.GenTransaction.isWellFormed`) + */ + @nowarn("msg=match may not be exhaustive") + def foldExecutionOrderM[F[_], A](tx: LfTransaction, initial: A)( + exerciseBegin: (LfNodeId, LfNodeExercises, A) => F[A] + )( + leaf: (LfNodeId, LfLeafOnlyActionNode, A) => F[A] + )(exerciseEnd: (LfNodeId, LfNodeExercises, A) => F[A])( + rollbackBegin: (LfNodeId, LfNodeRollback, A) => F[A] + )(rollbackEnd: (LfNodeId, LfNodeRollback, A) => F[A])(implicit F: Monad[F]): F[A] = { + + F.tailRecM(FrontStack.from(tx.roots.map(_ -> false)) -> initial) { + case (FrontStack(), x) => F.pure(Right(x)) + case (FrontStackCons((nodeId, upwards), toVisit), x) => + tx.nodes(nodeId) match { + case ne: LfNodeExercises => + if (upwards) F.map(exerciseEnd(nodeId, ne, x))(y => Left(toVisit -> y)) + else + F.map(exerciseBegin(nodeId, ne, x))(y => + Left((ne.children.map(_ -> false) ++: (nodeId -> true) +: toVisit) -> y) + ) + case nl: LfLeafOnlyActionNode => F.map(leaf(nodeId, nl, x))(y => Left(toVisit -> y)) + case nr: LfNodeRollback => + if (upwards) F.map(rollbackEnd(nodeId, nr, x))(y => Left(toVisit -> y)) + else + F.map(rollbackBegin(nodeId, nr, x))(y => + Left((nr.children.map(_ -> false) ++: (nodeId -> true) +: toVisit) -> y) + ) + } + } + } + + /** Given internally consistent transactions, compute their consumed contract ids. */ + def consumedContractIds( + transactions: Iterable[LfVersionedTransaction] + ): Set[LfContractId] = + transactions.foldLeft(Set.empty[LfContractId]) { case (consumed, tx) => + consumed | tx.consumedContracts + } + + /** Yields the signatories of the node's contract, or key maintainers for nodes without signatories. + */ + val signatoriesOrMaintainers: LfActionNode => Set[LfPartyId] = { + case n: LfNodeCreate => n.signatories + case n: LfNodeFetch => n.signatories + case n: LfNodeExercises => n.signatories + case n: LfNodeLookupByKey => n.keyMaintainers + } + + def stateKnownTo(node: LfActionNode): Set[LfPartyId] = node match { + case n: LfNodeCreate => n.keyOpt.fold(n.stakeholders)(_.maintainers) + case n: LfNodeFetch => n.stakeholders + case n: LfNodeExercises => n.stakeholders + case n: LfNodeLookupByKey => + n.result match { + case None => n.keyMaintainers + // TODO(#3013) use signatories or stakeholders + case Some(_) => n.keyMaintainers + } + } + + /** Yields the the acting parties of the node, if applicable + * + * @throws java.lang.IllegalArgumentException if a Fetch node does not contain the acting parties. + */ + val actingParties: LfActionNode => Set[LfPartyId] = { + case _: LfNodeCreate => Set.empty + + case node @ LfNodeFetch(_, _, noActors, _, _, _, _, _) if noActors.isEmpty => + throw new IllegalArgumentException(s"Fetch node $node without acting parties.") + case LfNodeFetch(_, _, actors, _, _, _, _, _) => actors + + case n: LfNodeExercises => n.actingParties + + case nl: LfNodeLookupByKey => nl.keyMaintainers + } + + /** Compute the informees of a transaction based on the ledger model definition. + * + * Refer to https://docs.daml.com/concepts/ledger-model/ledger-privacy.html#projections + */ + def informees(transaction: LfVersionedTransaction): Set[LfPartyId] = { + val nodes: Set[LfActionNode] = transaction.nodes.values.collect { case an: LfActionNode => + an + }.toSet + nodes.flatMap(_.informeesOfNode) + } + + val children: LfNode => Seq[LfNodeId] = { + case ex: LfNodeExercises => ex.children.toSeq + case _ => Seq.empty + } + + /** Yields the light-weight version (i.e. without exercise children and result) of this node. + * + * @throws java.lang.UnsupportedOperationException if `node` is a rollback. + */ + def lightWeight(node: LfActionNode): LfActionNode = { + node match { + case n: LfNodeCreate => n + case n: LfNodeFetch => n + case n: LfNodeExercises => n.copy(children = ImmArray.empty) + case n: LfNodeLookupByKey => n + } + } + + def metadataFromExercise(node: LfNodeExercises): ContractMetadata = + ContractMetadata.tryCreate(node.signatories, node.stakeholders, node.versionedKeyOpt) + + def metadataFromCreate(node: LfNodeCreate): ContractMetadata = + ContractMetadata.tryCreate(node.signatories, node.stakeholders, node.versionedKeyOpt) + + def metadataFromFetch(node: LfNodeFetch): ContractMetadata = + ContractMetadata.tryCreate(node.signatories, node.stakeholders, node.versionedKeyOpt) + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/LoggerUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/LoggerUtil.scala new file mode 100644 index 0000000000..1df347e0d6 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/LoggerUtil.scala @@ -0,0 +1,114 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import com.digitalasset.canton.logging.ErrorLoggingContext +import com.digitalasset.canton.tracing.TraceContext +import org.slf4j.event.Level + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration +import scala.util.control.NonFatal + +object LoggerUtil { + + /** Log a `message` at a given `level`. + * + * @param message The message to be logged. Call-by-name so that the message is computed only if the message is really logged. + */ + def logAtLevel(level: Level, message: => String)(implicit + loggingContext: ErrorLoggingContext + ): Unit = { + val logger = loggingContext.logger + implicit val traceContext: TraceContext = loggingContext.traceContext + level match { + case Level.TRACE => logger.trace(message) + case Level.DEBUG => logger.debug(message) + case Level.INFO => logger.info(message) + case Level.WARN => logger.warn(message) + case Level.ERROR => logger.error(message) + } + } + + /** Log a `message` with a `throwable` at a given `level`. */ + def logThrowableAtLevel(level: Level, message: => String, throwable: => Throwable)(implicit + loggingContext: ErrorLoggingContext + ): Unit = { + val logger = loggingContext.logger + implicit val traceContext: TraceContext = loggingContext.traceContext + level match { + case Level.TRACE => logger.trace(message, throwable) + case Level.DEBUG => logger.debug(message, throwable) + case Level.INFO => logger.info(message, throwable) + case Level.WARN => logger.warn(message, throwable) + case Level.ERROR => logger.error(message, throwable) + } + } + + /** Log the time taken by a task `run` */ + def clue[T](message: => String)(run: => T)(implicit loggingContext: ErrorLoggingContext): T = { + val logger = loggingContext.logger + implicit val traceContext: TraceContext = loggingContext.traceContext + logger.debug(s"Starting $message") + val st = System.nanoTime() + val ret = run + val end = roundDurationForHumans(Duration(System.nanoTime() - st, TimeUnit.NANOSECONDS)) + logger.debug(s"Finished $message after $end") + ret + } + + /** Round a duration such that humans can easier graps the numbers + * + * Duration offers a method .toCoarsest that will figure out the coarsest + * time unit. However, this method doesn't really do anything if we have nanoseconds + * as it only truncates 0. + * + * Therefore, this method allows to set lower digits to 0 and only keep the leading digits as nonzeros. + */ + def roundDurationForHumans(duration: Duration, keep: Int = 2): Duration = { + if (duration.isFinite && duration.length != 0) { + val length = Math.abs(duration.length) + val adjusted = length - length % math + .pow(10, Math.max(0, math.floor(math.log10(length.toDouble)) - keep)) + Duration(if (duration.length > 0) adjusted else -adjusted, duration.unit).toCoarsest + } else duration + } + + def logOnThrow[T](task: => T)(implicit loggingContext: ErrorLoggingContext): T = { + try { + task + } catch { + case NonFatal(e) => + loggingContext.error("Unhandled exception thrown!", e) + throw e + } + } + + def logOnThrow_(task: => Unit)(implicit loggingContext: ErrorLoggingContext): Unit = { + try { + task + } catch { + case NonFatal(e) => + loggingContext.logger.error("Unhandled exception thrown!", e)(loggingContext.traceContext) + } + } + + /** truncates a string + * + * @param maxLines truncate after observing the given number of newline characters + * @param maxSize truncate after observing the given number of characters + */ + def truncateString(maxLines: Int, maxSize: Int)(str: String): String = { + val builder = new StringBuilder() + val (lines, length) = str.foldLeft((0, 0)) { + case ((lines, length), elem) if lines < maxLines && length < maxSize => + builder.append(elem) + (lines + (if (elem == '\n') 1 else 0), length + 1) + case (acc, _) => acc + } + val append = if (lines == maxLines || length == maxSize) " ..." else "" + builder.toString + append + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/MapsUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/MapsUtil.scala new file mode 100644 index 0000000000..5816f1d5b7 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/MapsUtil.scala @@ -0,0 +1,243 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.FlatMap +import cats.data.Chain +import cats.kernel.Semigroup +import cats.syntax.either.* +import cats.syntax.foldable.* +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.logging.ErrorLoggingContext + +import scala.annotation.tailrec +import scala.collection.{concurrent, mutable} + +object MapsUtil { + + /** Merges two maps where the values are sets of V + * + * @param big the likely larger of the two maps (so we can merge the small into the big, + * rather than the other way around) + * @param small the likely smaller of the maps. + */ + def mergeMapsOfSets[K, V](big: Map[K, Set[V]], small: Map[K, Set[V]]): Map[K, Set[V]] = { + small.foldLeft(big) { case (acc, (k, v)) => + acc.updated(k, acc.getOrElse(k, Set()).union(v)) + } + } + + /** Atomically modifies the given map at the given key. + * `notFound` may be evaluated even if the key is present at the atomic update; + * in this case, its monadic effect propagates to the result. + * `f` may be evaluated several times on several of the previous values associated to the key, + * even if no value is associated with the key immediately before the atomic update happens. + * The monadic effect of all these evaluations propagates to the result. + * + * @param map The map that is updated + * @param notFound The value to be used if the key was not present in the map. + * [[scala.None$]] denotes that the map should not be modified. + * @param f The function used to transform the value found in the map. + * If the returned value is [[scala.None$]], the key is removed from the map. + * @return The value associated with the key before the update. + */ + def modifyWithConcurrentlyM[F[_], K, V]( + map: concurrent.Map[K, V], + key: K, + notFound: => F[Option[V]], + f: V => F[Option[V]], + )(implicit monad: FlatMap[F]): F[Option[V]] = { + // Make sure that we evaluate `notFound` at most once + lazy val notFoundV = notFound + + def step(): F[Either[Unit, Option[V]]] = map.get(key) match { + case None => + monad.map(notFoundV) { + case None => Either.right[Unit, Option[V]](None) + case Some(newValue) => + map + .putIfAbsent(key, newValue) + .fold(Either.right[Unit, Option[V]](None))(_ => Either.left[Unit, Option[V]](())) + } + case Some(oldValue) => + monad.map(f(oldValue)) { + case None => Either.cond(map.remove(key, oldValue), Some(oldValue), ()) + case Some(newValue) => + Either.cond(map.replace(key, oldValue, newValue), Some(oldValue), ()) + } + } + + monad.tailRecM(())((_: Unit) => step()) + } + + /** Atomically updates the given map at the given key. In comparison to [[modifyWithConcurrentlyM]], + * this method supports only inserting and updating elements. It does not support removing elements or preventing the + * insertion of elements when they are not originally in the map. + */ + def updateWithConcurrentlyM[F[_], K, V]( + map: concurrent.Map[K, V], + key: K, + notFound: => F[V], + f: V => F[V], + )(implicit monad: FlatMap[F]): F[Option[V]] = + modifyWithConcurrentlyM( + map, + key, + monad.map(notFound)(Some(_)), + (v: V) => monad.map(f(v))(Some(_)), + ) + + def modifyWithConcurrentlyM_[F[_], K, V]( + map: concurrent.Map[K, V], + key: K, + notFound: => F[Option[V]], + f: V => F[Option[V]], + )(implicit monad: FlatMap[F]): F[Unit] = + monad.void(modifyWithConcurrentlyM(map, key, notFound, f)) + + def updateWithConcurrentlyM_[F[_], K, V]( + map: concurrent.Map[K, V], + key: K, + notFound: => F[V], + f: V => F[V], + )(implicit monad: FlatMap[F]): F[Unit] = + monad.void(updateWithConcurrentlyM(map, key, notFound, f)) + + /** Specializes [[updateWithConcurrentlyM_]] to the [[Checked]] monad, + * where the non-aborts are only kept from the invocation of `notFound` or `f` + * that causes the first [[Checked.Abort]] or updates the map. + */ + def modifyWithConcurrentlyChecked_[A, N, K, V]( + map: concurrent.Map[K, V], + key: K, + notFound: => Checked[A, N, Option[V]], + f: V => Checked[A, N, Option[V]], + ): Checked[A, N, Unit] = { + /* The update function `f` may execute several times for different values, + * In that case, we may get several non-aborts reported + * for the same contract update. + * + * To keep only those from the last update, we group the problems in `Chain`s and only keep the last one. + */ + def lift(chain: Chain[N]): Chain[Chain[N]] = Chain(chain) + + MapsUtil + .modifyWithConcurrentlyM_( + map, + key, + notFound.mapNonaborts(lift), + (x: V) => f(x).mapNonaborts(lift), + ) + .mapNonaborts { chainsOfNonaborts => + ChainUtil.lastOption(chainsOfNonaborts).getOrElse(Chain.empty) + } + } + + def updateWithConcurrentlyChecked_[A, N, K, V]( + map: concurrent.Map[K, V], + key: K, + notFound: => Checked[A, N, V], + f: V => Checked[A, N, V], + ): Checked[A, N, Unit] = + modifyWithConcurrentlyChecked_(map, key, notFound.map(Some(_)), (v: V) => f(v).map(Some.apply)) + + /** @param m An input map + * @param f A function to map each of the input keys to a new set of keys ks + * + * Generates a new map m' with the following property: + * If { (k,v) in m and k' in f(k) } then v is in the set m'[k'] + * + * See `MapsUtilTest` for an example. + */ + def groupByMultipleM[M[_], K, K2, V]( + m: Map[K, V] + )(f: K => M[Set[K2]])(implicit M: cats.Monad[M]): M[Map[K2, Set[V]]] = { + m.toList.foldM(Map.empty[K2, Set[V]]) { case (m, (k, v)) => + M.map(f(k)) { + _.toList.foldLeft(m) { (m_, k2) => + val newVal = m_.getOrElse(k2, Set.empty[V]) + v + m_ + (k2 -> newVal) + } + } + } + } + + /** Updates the key of the current map if present. + * The update function `f` may be evaluated multiple times. + * if `f` throws an exception, the exception propagates and the map is not updated. + * + * @return Whether the map changed due to this update + */ + def updateWithConcurrently[K, V <: AnyRef](map: concurrent.Map[K, V], key: K)( + f: V => V + ): Boolean = { + @tailrec def go(): Boolean = map.get(key) match { + case None => false + case Some(current) => + val next = f(current) + if (current eq next) + false // Do not modify the map if the transformation doesn't change anything + else if (map.replace(key, current, next)) true + else go() // concurrent modification, so let's retry + } + go() + } + + /** Insert the pair (key, value) to `map` if not already present. + * Assert that any existing element for `key` is equal to `value`. + * @throws java.lang.IllegalStateException if the assertion fails + */ + def tryPutIdempotent[K, V](map: concurrent.Map[K, V], key: K, value: V)(implicit + loggingContext: ErrorLoggingContext + ): Unit = + map.putIfAbsent(key, value).foreach { oldValue => + ErrorUtil.requireState( + oldValue == value, + s"Map key $key already has value $oldValue assigned to it. Cannot insert $value.", + ) + } + + def mergeWith[K, V](map1: Map[K, V], map2: Map[K, V])(f: (V, V) => V): Map[K, V] = { + // We don't need `f`'s associativity when we merge maps + implicit val semigroupV = new Semigroup[V] { + override def combine(x: V, y: V): V = f(x, y) + } + Semigroup[Map[K, V]].combine(map1, map2) + } + + def extendMapWith[K, V](m: mutable.Map[K, V], extendWith: IterableOnce[(K, V)])( + merge: (V, V) => V + ): Unit = { + extendWith.iterator.foreach { case (k, v) => + m.updateWith(k) { + case None => Some(v) + case Some(mv) => Some(merge(mv, v)) + }.discard[Option[V]] + } + } + + def extendedMapWith[K, V](m: Map[K, V], extendWith: IterableOnce[(K, V)])( + merge: (V, V) => V + ): Map[K, V] = + extendWith.iterator.foldLeft(m) { case (acc, (k, v)) => + acc.updatedWith(k) { + case None => Some(v) + case Some(mv) => Some(merge(mv, v)) + } + } + + /** Return all key-value pairs in minuend that are different / missing in subtrahend + * + * @throws java.lang.IllegalArgumentException if the minuend is not defined for all keys of the subtrahend. + */ + def mapDiff[K, V](minuend: Map[K, V], subtrahend: collection.Map[K, V])(implicit + loggingContext: ErrorLoggingContext + ): Map[K, V] = { + ErrorUtil.requireArgument( + subtrahend.keySet.subsetOf(subtrahend.keySet), + s"Cannot compute map difference if minuend is not defined whenever subtrahend is defined. Missing keys: ${subtrahend.keySet diff minuend.keySet}", + ) + minuend.filter { case (k, v) => !subtrahend.get(k).contains(v) } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/MessageRecorder.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/MessageRecorder.scala new file mode 100644 index 0000000000..43116a0e1e --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/MessageRecorder.scala @@ -0,0 +1,126 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.lifecycle.FlagCloseable +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} +import com.digitalasset.canton.tracing.TraceContext + +import java.io.* +import java.nio.file.Path +import java.util.concurrent.atomic.AtomicReference +import scala.annotation.tailrec +import scala.concurrent.blocking +import scala.reflect.ClassTag +import scala.util.{Failure, Success, Try} + +/** Persists data for replay tests. + */ +class MessageRecorder( + override protected val timeouts: ProcessingTimeout, + override val loggerFactory: NamedLoggerFactory, +) extends FlagCloseable + with NamedLogging { + + val streamRef: AtomicReference[Option[ObjectOutputStream]] = new AtomicReference(None) + + def startRecording(destination: Path)(implicit traceContext: TraceContext): Unit = { + logger.debug(s"Start recording to $destination") + + sys.addShutdownHook { + // This is important to not lose messages, if SIGTERM is sent to Canton. + if (streamRef.get().isDefined) { + stopRecording() + } + }.discard + + val stream = new ObjectOutputStream( + new BufferedOutputStream(new FileOutputStream(destination.toFile)) + ) + + val started = streamRef.compareAndSet(None, Some(stream)) + + if (started) { + logger.debug(s"Started recording to $destination") + } else { + stream.close() + ErrorUtil.internalError(new IllegalStateException("Already recording.")) + } + } + + /** Serializes and saves the provided message to the output stream. + * This method is synchronized as the write operations on the underlying [[java.io.ObjectOutputStream]] are not thread safe. + */ + def record(message: Serializable): Unit = blocking(synchronized { + streamRef.get().foreach(_.writeObject(message)) + }) + + def stopRecording()(implicit traceContext: TraceContext): Unit = { + logger.debug("Stopping recording...") + streamRef.getAndSet(None) match { + case Some(stream) => + blocking(synchronized { stream.close() }) + case None => + logger.info("Recorder has not been recording.") + } + } + + override def onClosed(): Unit = stopRecording()(TraceContext.empty) +} + +object MessageRecorder { + + private val loader: ClassLoader = Thread.currentThread().getContextClassLoader + + /** Yields a list containing all messages stored at `source`. + * Be aware that the method loads all messages into memory. This is tailored to performance testing, + * because it allows for loading messages before starting performance measurements. + * + * @throws java.lang.ClassCastException if a message is not of type `T` + */ + def load[T <: Serializable](source: Path, logger: TracedLogger)(implicit + classTag: ClassTag[T], + traceContext: TraceContext, + ): List[T] = { + ResourceUtil.withResource(new BufferedInputStream(new FileInputStream(source.toFile))) { + rawStream => + ResourceUtil.withResource(new ObjectInputStream(rawStream) { + // Workaround for known bugs in the deserialization framework. + // https://github.com/scala/bug/issues/9777 + // https://bugs.openjdk.java.net/browse/JDK-8024931 + override def resolveClass(desc: ObjectStreamClass): Class[_] = + Class.forName(desc.getName, false, loader) + }) { stream => + val builder = List.newBuilder[T] + + @SuppressWarnings(Array("org.wartremover.warts.Return")) + @tailrec def go(): Unit = + if (rawStream.available() > 0) { + Try(stream.readObject()) match { + case Success(classTag(message)) => builder += message + case Success(_) => + throw new ClassCastException(s"Unable to cast message to type $classTag.") + case Failure(_: EOFException) => + // This can occur if Canton is killed during recording. Just stop reading. + logger.info( + s"Unexpected EOF while reading messages from $source. Discarding rest of input..." + ) + return + case Failure(e) => + logger.error(s"An unexpected exception occurred while reading from $source", e) + val numSuccessful = builder.result().size + logger.error(s"Number of successfully read messages is $numSuccessful.") + throw e + } + go() + } else {} + go() + + builder.result() + } + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/MonadUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/MonadUtil.scala new file mode 100644 index 0000000000..b59823a82c --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/MonadUtil.scala @@ -0,0 +1,125 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.syntax.parallel.* +import cats.{Monad, Monoid, Parallel} +import com.digitalasset.canton.config.RequireTypes.PositiveInt + +import scala.annotation.tailrec +import scala.collection.immutable + +object MonadUtil { + + /** The caller must ensure that the underlying data structure of the iterator is immutable */ + def foldLeftM[M[_], S, A](initialState: S, iter: Iterator[A])( + step: (S, A) => M[S] + )(implicit monad: Monad[M]): M[S] = + monad.tailRecM[S, S](initialState) { state => + if (iter.hasNext) { + monad.map(step(state, iter.next()))(newState => Left(newState)) + } else monad.pure(Right(state)) + } + + def foldLeftM[M[_], S, A](initialState: S, xs: immutable.Iterable[A])(step: (S, A) => M[S])( + implicit monad: Monad[M] + ): M[S] = + foldLeftM(initialState, xs.iterator)(step) + + /** The implementation of `traverse` in `cats` is parallel, so this provides a sequential alternative. + * The caller must ensure that the Iterable is immutable + * + * Do not use Cats' .traverse_ methods as Cats does not specify whether the `step` runs sequentially or in parallel + * for future-like monads. In fact, this behaviour differs for different versions of Cats. + */ + def sequentialTraverse_[M[_], A](xs: Iterable[A])(step: A => M[_])(implicit + monad: Monad[M] + ): M[Unit] = + sequentialTraverse_(xs.iterator)(step) + + /** The caller must ensure that the underlying data structure of the iterator is immutable + * + * Do not use Cats' .traverse_ methods as Cats does not specify whether the `step` runs sequentially or in parallel + * for future-like monads. In fact, this behaviour differs for different versions of Cats. + */ + def sequentialTraverse_[M[_], A](xs: Iterator[A])(step: A => M[_])(implicit + monad: Monad[M] + ): M[Unit] = + foldLeftM((), xs)((_, x) => monad.void(step(x))) + + /** Repeatedly apply the same function to a monadic value `m`. This can be used to retry until the + * limit `counter` is reached or the monad `m` aborts. + */ + @tailrec + def repeatFlatmap[M[_], A](m: M[A], f: A => M[A], counter: Int)(implicit + monad: Monad[M] + ): M[A] = { + counter match { + case 0 => m + case n => + require(n > 0, s"Trying to repeat with negative counter: $n") + val next = monad.flatMap(m)(f) + repeatFlatmap(next, f, counter - 1) + } + } + + def sequentialTraverse[X, M[_], S]( + xs: Seq[X] + )(f: X => M[S])(implicit monad: Monad[M]): M[Seq[S]] = { + val result = foldLeftM(Seq.empty: Seq[S], xs)((ys, x) => monad.map(f(x))(y => y +: ys)) + monad.map(result)(seq => seq.reverse) + } + + /** Batched version of sequential traverse + * + * Can be used to avoid overloading the database queue. Use e.g. maxDbConnections * 2 + * as parameter for parallelism to not overload the database queue but to make sufficient use + * of the existing resources. + */ + def batchedSequentialTraverse[X, M[_], S](parallelism: PositiveInt, chunkSize: PositiveInt)( + xs: Seq[X] + )(processChunk: Seq[X] => M[Seq[S]])(implicit M: Parallel[M]): M[Seq[S]] = + M.monad.map( + sequentialTraverse(xs.grouped(chunkSize.value).grouped(parallelism.value).toSeq)( + _.parFlatTraverse(processChunk) + )(M.monad) + )(_.flatten) + + /** Parallel traverse with limited parallelism + */ + def parTraverseWithLimit[X, M[_], S](parallelism: Int)( + xs: Seq[X] + )(processElement: X => M[S])(implicit M: Parallel[M]): M[Seq[S]] = + M.monad.map( + sequentialTraverse(xs.grouped(parallelism).toSeq)( + _.parTraverse(processElement) + )(M.monad) + )(_.flatten) + + def parTraverseWithLimit_[X, M[_], S](parallelism: Int)( + xs: Seq[X] + )(processElement: X => M[S])(implicit M: Parallel[M]): M[Unit] = + M.monad.void( + sequentialTraverse(xs.grouped(parallelism).toSeq)( + _.parTraverse(processElement) + )(M.monad) + ) + + def batchedSequentialTraverse_[X, M[_]](parallelism: PositiveInt, chunkSize: PositiveInt)( + xs: Seq[X] + )(processChunk: Seq[X] => M[Unit])(implicit M: Parallel[M]): M[Unit] = { + sequentialTraverse_(xs.grouped(chunkSize.value).grouped(parallelism.value))(chunk => + chunk.toSeq.parTraverse_(processChunk) + )(M.monad) + } + + /** Conceptually equivalent to `sequentialTraverse(xs)(step).map(monoid.combineAll)`. + */ + def sequentialTraverseMonoid[M[_], A, B]( + xs: immutable.Iterable[A] + )(step: A => M[B])(implicit monad: Monad[M], monoid: Monoid[B]): M[B] = + foldLeftM[M, B, A](monoid.empty, xs) { (acc, x) => + monad.map(step(x))(monoid.combine(acc, _)) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/NoCopy.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/NoCopy.scala new file mode 100644 index 0000000000..2d3b143287 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/NoCopy.scala @@ -0,0 +1,11 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +/** Prevents auto-generation of the copy method in a case class. + * Case classes with private constructors typically shouldn't have a copy method. + */ +trait NoCopy { + protected def copy(nothing: Nothing): Nothing = nothing +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/OptionUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/OptionUtil.scala new file mode 100644 index 0000000000..92ce1dfd5f --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/OptionUtil.scala @@ -0,0 +1,79 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.instances.option.* +import cats.syntax.flatMap.* +import cats.syntax.functor.* +import cats.syntax.parallel.* +import cats.{Monad, Parallel} +import com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString + +import scala.annotation.nowarn + +object OptionUtil { + + /** If left is non empty, zip its content with the lazily evaluated result of right (if right returns a non empty result as well), + * and apply f to the pair (l, r) + */ + def zipWithF[F[_]: Monad: Parallel, A, B, C](left: Option[A], right: => F[Option[B]])( + f: (A, B) => F[C] + ): F[Option[C]] = { + left.parFlatTraverse(l => right.map(r => Some(l).zip(r))).flatMap { + case Some(ab) => Function.tupled(f).andThen(_.map(Option(_)))(ab) + case None => implicitly[Monad[F]].pure(None) + } + } + + /** [[zipWithF]] but returns a default value if either of left or right are empty + */ + def zipWithFDefaultValue[F[_]: Monad: Parallel, A, B, C]( + left: Option[A], + right: => F[Option[B]], + empty: => C, + )(f: (A, B) => F[C]): F[C] = { + zipWithF(left, right)(f).map(_.getOrElse(empty)) + } + + def mergeWithO[A](left: Option[A], right: Option[A])(f: (A, A) => Option[A]): Option[Option[A]] = + (left, right) match { + case (None, _) => Some(right) + case (_, None) => Some(left) + case (Some(x), Some(y)) => f(x, y).map(Some(_)) + } + + /** Return None iff both `left` and `right` are defined and not equal. + * + * Otherwise, return + * - Some(left), if only left is defined + * - Some(right), if right is defined + */ + def mergeEqual[A](left: Option[A], right: Option[A]): Option[Option[A]] = { + if (left eq right) Some(left) + else + mergeWithO(left, right) { (x, y) => + if (x == y) left else None + } + } + + def mergeWith[A](left: Option[A], right: Option[A])(f: (A, A) => A): Option[A] = { + @nowarn("msg=match may not be exhaustive") // mergeWithO is always defined + val Some(result) = mergeWithO(left, right)((l, r) => Some(f(l, r))) + result + } + + def zipWith[A, B, C](left: Option[A], right: Option[B])(f: (A, B) => C): Option[C] = + for { + l <- left + r <- right + } yield f(l, r) + + def emptyStringAsNone(str: String): Option[String] = if (str.isEmpty) None else Some(str) + def emptyStringAsNone[S <: LengthLimitedString](str: S): Option[S] = + if (str.unwrap.isEmpty) None else Some(str) + def noneAsEmptyString(strO: Option[String]): String = strO.getOrElse("") + + def zeroAsNone(n: Int): Option[Int] = if (n == 0) None else Some(n) + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/OrderedBucketMergeHub.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/OrderedBucketMergeHub.scala new file mode 100644 index 0000000000..c54a665198 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/OrderedBucketMergeHub.scala @@ -0,0 +1,1174 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.syntax.functor.* +import com.daml.nameof.NameOf.qualifiedNameOfCurrentFunc +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.concurrent.DirectExecutionContext +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.tracing.{HasTraceContext, TraceContext} +import com.digitalasset.canton.util.OrderedBucketMergeHub.OutputElement +import com.digitalasset.canton.util.PekkoUtil.{LoggingInHandler, LoggingOutHandler} +import com.digitalasset.canton.util.ShowUtil.* +import org.apache.pekko.Done +import org.apache.pekko.stream.scaladsl.Source +import org.apache.pekko.stream.stage.{ + AsyncCallback, + GraphStageLogic, + GraphStageWithMaterializedValue, + InHandler, + OutHandler, +} +import org.apache.pekko.stream.{Attributes, FlowShape, Inlet, KillSwitch, Outlet} + +import java.util.concurrent.atomic.AtomicInteger +import scala.collection.mutable +import scala.concurrent.{Future, Promise} + +/** A custom Pekko [[org.apache.pekko.stream.stage.GraphStage]] that merges several ordered source streams into one + * based on those sources reaching a threshold for equivalent elements. + * + * The ordered sources produce elements with totally ordered offsets. + * For a given threshold `t`, whenever `t` different sources have produced equivalent elements for an offset + * that is higher than the previous offset, the [[OrderedBucketMergeHub]] emits the map of all these equivalent elements + * as the next [[com.digitalasset.canton.util.OrderedBucketMergeHub.OutputElement]] to downstream. + * Elements from the other ordered sources with lower or equal offset that have not yet reached the threshold are dropped. + * + * Every correct ordered source should produce the same sequence of offsets. + * Faulty sources can produce any sequence of elements as they like. + * The threshold should be set to `F+1` where at most `F` sources are assumed to be faulty, + * and at least `2F+1` ordered sources should be configured. + * This ensures that the `F` faulty ordered sources cannot corrupt the stream nor block it. + * + * If this assumption is violated, the [[OrderedBucketMergeHub]] may deadlock, + * as it only looks at the next element of each ordered source + * (this avoids unbounded buffering and therefore ensures that downstream backpressure reaches the ordered sources). + * For example, given a threshold of 2 with three ordered sources, two of which are faulty, + * the first elements of the sources have offsets 1, 2, 3. + * Suppose that the first ordered source's second element had offset 3 and is equivalent to the third ordered source's first element. + * Then, by the above definition of merging, the stage could emit the elements with offset 3 and discard those with 1 and 2. + * However, this is not yet implemented; the stream just does not emit anything. + * Neither are such deadlocks detected right now. + * This is because in an asynchronous system, there typically are ordered sources that have not yet delivered their next element, + * and possibly may never will within useful time, say because they have crashed (which is not considered a fault). + * In the above example, suppose that the second ordered source had not emitted the element with offset 2. + * Then it is unknown whether the element with offset 1 should be emitted or not, + * because we do not know which ordered sources are correct. + * Suppose we had decided that we drop the elements with offset 1 from a correct ordered source + * and emit the ones with offset 3 instead, + * Then the second (delayed, but correct) ordered source can still send an equivalent element with 1, + * and so the decision of dropping 1 was wrong in hindsight. + * + * The [[OrderedBucketMergeHub]] manages the ordered sources. + * Their configurations and the threshold are coming through the [[OrderedBucketMergeHub]]'s input stream as a [[OrderedBucketMergeConfig]]. + * As soon as a new [[OrderedBucketMergeConfig]] is available, + * the [[OrderedBucketMergeHub]] changes the ordered sources as necessary: + * + * - Ordered sources are identified by their `Name`. + * - Existing ordered sources whose name does not appear in the new configuration are stopped. + * - If a new configuration contains a new name for an ordered source, a new ordered source is created using `ops`. + * - If the configuration of an ordered source changes, the previous source is stopped and a new one with the new configuration is created. + * + * The [[OrderedBucketMergeHub]] emits [[com.digitalasset.canton.util.OrderedBucketMergeHub.ControlOutput]] events to downstream: + * + * - [[com.digitalasset.canton.util.OrderedBucketMergeHub.NewConfiguration]] signals the new configuration in place. + * - [[com.digitalasset.canton.util.OrderedBucketMergeHub.ActiveSourceTerminated]] signals + * that an ordered source has completed or aborted with an error before it was stopped. + * + * Since configuration changes are consumed eagerly, the [[OrderedBucketMergeHub]] buffers + * these [[com.digitalasset.canton.util.OrderedBucketMergeHub.ControlOutput]] events + * if downstream is not consuming them fast enough. + * The stream of configuration changes should therefore be slower than downstream; + * otherwise, the buffer will grow unboundedly and lead to [[java.lang.OutOfMemoryError]]s eventually. + * + * When the configuration stream completes or aborts, all ordered sources are stopped + * and the output stream completes. + * + * An ordered source is stopped by pulling its [[org.apache.pekko.stream.KillSwitch]] + * and dropping all elements until the source completes or aborts. + * In particular, the ordered source is not just simply cancelled upon a configuration change + * or when the configuration stream completes. + * This allows for properly synchronizing the completion of the [[OrderedBucketMergeHub]] with + * the internal computations happening in the ordered sources. + * To that end, the [[OrderedBucketMergeHub]] materializes to a [[scala.concurrent.Future]] + * that completes when the corresponding futures from all created ordered sources have completed + * as well as the ordered sources themselves. + * + * If downstream cancels, the [[OrderedBucketMergeHub]] cancels all sources and the input port, + * without draining them. Therefore, the materialized [[scala.concurrent.Future]] may or may not complete, + * depending on the shape of the ordered sources. For example, if the ordered sources' futures are + * created with a plain [[org.apache.pekko.stream.scaladsl.FlowOpsMat.watchTermination]], it will complete because + * [[org.apache.pekko.stream.scaladsl.FlowOpsMat.watchTermination]] completes immediately when it sees a cancellation. + * Therefore, it is better to avoid downstream cancellations altogether. + * + * Rationale for the merging logic: + * + * This graph stage is meant to merge the streams of sequenced events from several sequencers on a client node. + * The operator configures `N` sequencer connections and specifies a threshold `T`. + * Suppose the operator assumes that at most `F` nodes out of `N` are faulty. + * So we need `F < T` for safety. + * For liveness, the operator wants to tolerate as many crashes of correct sequencer nodes as feasible. + * Let `C` be the number of tolerated crashes. + * Then `T <= N - C - F` because faulty sequencers may not deliver any messages. + * For a fixed `F`, `T = F + 1` is optimal as we can then tolerate `C = N - 2F - 1` crashed sequencer nodes. + * + * In other words, if the operator wants to tolerate up to `F` faults and up to `C` crashes, + * then it should set `T = F + 1` and configure `N = 2F + C + 1` different sequencer connections. + * + * If more than `C` sequencers have crashed, then the faulty sequencers can make the client deadlock. + * The client cannot detect this under the asynchrony assumption. + * + * Moreover, the client cannot distinguish either between + * whether a sequencer node is actively malicious or just accidentally faulty. + * In particular, if several sequencer nodes deliver inequivalent events, + * we currently silently drop them. + * TODO(#14365) Design and implement an alert mechanism + * + * @param ops The operations for the abstracted-away parameters. + * In particular, the equivalence relation between elements is expressed as the pre-image of + * the `equals` relation under the [[OrderedBucketMergeHubOps.bucketOf]] function, i.e., + * two elements are equivalent if they end up in the same bucket. + * @param enableInvariantCheck If true, invariants of the [[OrderedBucketMergeHub]] implementation are checked at run-time. + * Invariant violation are then logged as [[java.lang.IllegalStateException]] and abort the stage with an error. + * Do not enable these checks in production. + */ +class OrderedBucketMergeHub[Name: Pretty, A, Config, Offset: Pretty, M]( + private val ops: OrderedBucketMergeHubOps[Name, A, Config, Offset, M], + override protected val loggerFactory: NamedLoggerFactory, + enableInvariantCheck: Boolean, +) extends GraphStageWithMaterializedValue[ + FlowShape[ + OrderedBucketMergeConfig[Name, Config], + OrderedBucketMergeHub.Output[Name, (Config, Option[M]), A, Offset], + ], + Future[Done], + ] + with NamedLogging { + import OrderedBucketMergeHub.* + + type ConfigAndMat = (Config, Option[M]) + + private[this] val out: Outlet[Output[Name, ConfigAndMat, A, Offset]] = + Outlet("OrderedBucketMergeHub.out") + private[this] val in: Inlet[OrderedBucketMergeConfig[Name, Config]] = + Inlet("OrderedBucketMergeHub.in") + override def shape: FlowShape[ + OrderedBucketMergeConfig[Name, Config], + OrderedBucketMergeHub.Output[Name, ConfigAndMat, A, Offset], + ] = FlowShape(in, out) + + @SuppressWarnings(Array("org.wartremover.warts.Var")) + private class BucketingLogic(enclosingAttributes: Attributes) extends GraphStageLogic(shape) { + // This contains the mutable state of the graph state logic. + // Since Pekko streams runs all the handlers sequentially, + // we can use plain vars and mutable data structures. + // We do not need to worry about inter-thread synchronization + // provided that all accesses are only from within the handlers. + + /** The [[OrderedSource]]s that have been created and not yet fully stopped + * An [[OrderedSource]] gets added to this map when it is created in [[createActiveSource]]. + * It gets removed when it has completed and all its elements have been emitted or evicted. + */ + private[this] val orderedSources: mutable.Map[OrderedSourceId, OrderedSource] = + mutable.Map.empty[OrderedSourceId, OrderedSource] + + /** The data associated with an ordered source and its state. + * + * The state evolves according to the following diagram where each box represents one or multiple states. + * The letters `A`, `B`, and `C` stand for [[OrderedSource.isActive]], [[OrderedSource.isInBucket]], + * and [[OrderedSource.hasCompleted]], respectively. If they are present in a box, + * the states represented by the box have the corresponding predicate evaluate to true. + * If the box prefixes them with `!`, the predicate must evaluate to false in this state. + * The letter `P` represents whether the [[OrderedSource.inlet]] has been pulled. + * It is omitted when the pulling state is irrelevant. + * + * The `remove` arrows going nowhere indicate that the ordered source ends its lifecycle + * by being removed from [[orderedSources]]. + * + *
+      *                                              │start
+      *            ┌─────┐         stop           ┌──▼──┐                 ┌─────┐
+      *        ┌───┤     ◄────────────────────────┤     │                 │     │
+      *        │   │ !A  │                        │  A  │    complete     │  A  │ emit ActiveSourceTermination
+      *    next│   │ !B  │                        │ !B  ├─────────────────► !B  ├─────────────────────────────►
+      * element│   │ !C  │                        │ !C  │                 │  C  │ remove
+      *        │   │  P  │                        │  P  │                 │     │
+      *        └───►     ◄────────┐          ┌────►     ◄────────┐        │     │
+      *            └──┬──┘        │      emit│    └──┬─┬┘        │        └──▲──┘
+      *               │           │    bucket│       │ │         │           │
+      *               │           │    if not│       │ └─────────┘           │
+      *               │           │  rejected│       │   next element        │
+      *               │           │          │       │   with smaller        │
+      *               │           │          │       │   offset              │
+      *               │           │       ┌──┴──┐    │                       │
+      *               │           │       │     │    │                       │
+      *               │           │       │  A  │    │next                   │emit bucket
+      *               │complete   │       │ !B  │    │element                │if not rejected
+      *               │           │       │ !C  │    │with                   │
+      *               │           │       │ !P  │    │higher                 │
+      *               │           │       │     │    │offset                 │
+      *               │           │       └──▲──┘    │                       │
+      *               │           │          │       │                       │
+      *               │           │          │       │                       │
+      *               │           │    remove│       │                       │
+      *               │           │    bucket│       │                       │
+      *            ┌──▼──┐        │          │    ┌──▼──┐                 ┌──┴──┐        ┌─────┐
+      *            │     │        │          └────┤     │                 │     │        │     │
+      *            │ !A  │        │               │  A  │                 │  A  │        │ !A  │
+      *            │ !B  │        │    stop       │  B  │    complete     │  B  │  stop  │ !B  │
+      *    ◄───────┤  C  │        └───────────────┤ !C  ├─────────────────►  C  ├────────►  C  ├───────►
+      *     remove │     │                        │ !P  │                 │ !P  │        │ !P  │ remove
+      *            │     │                        │     │                 │     │        │     │
+      *            └─────┘                        └─────┘                 └─────┘        └─────┘
+      * 
+ * + * @param inlet The inlet through which the source's elements are passed. + * We pull the inlet immediately upon creation and then whenever the source's last element + * is emitted downstream or its bucket is rejected. + */ + private final class OrderedSource( + val name: Name, + val config: Config, + val inlet: SubSinkInlet[A], + val killSwitchCell: SingleUseCell[KillSwitch], + ) { + import TraceContext.Implicits.Empty.* + + /** Whether the [[OrderedSource]] is active, i.e., it has not yet been stopped + * due to a configuration change or completion of the configuration stream. + */ + private var active: Boolean = true + def isActive: Boolean = active + def stop(): Unit = { + if (enableInvariantCheck) { + ErrorUtil.requireState(isActive, s"Cannot stop the source $name twice.") + } + active = false + } + + /** The bucket, if any, that contains the last element that was pulled from this ordered source. */ + private var lastBucket: Option[ops.Bucket] = None + def isInBucket: Boolean = lastBucket.nonEmpty + def addToBucket(bucket: ops.Bucket): Unit = { + if (enableInvariantCheck) { + implicit val prettyBucket: Pretty[ops.Bucket] = ops.prettyBucket + ErrorUtil.requireState( + lastBucket.isEmpty, + show"Cannot add source $name to another bucket $bucket. It is already in $lastBucket", + ) + } + lastBucket = Some(bucket) + } + def removeFromBucket(): Unit = { lastBucket = None } + def getBucket: Option[ops.Bucket] = lastBucket + + /** [[scala.None$]] as long as the [[com.digitalasset.canton.util.OrderedBucketMergeHub.OrderedSourceSignal.Completed]] + * signal from the ordered source has not yet been processed. + * When the [[com.digitalasset.canton.util.OrderedBucketMergeHub.OrderedSourceSignal.Completed]] signal is processed + * then this is set to contain the completion reason. + */ + private var completedWith: Option[Option[Throwable]] = None + def getCompletion: Option[Option[Throwable]] = completedWith + def hasCompleted: Boolean = completedWith.isDefined + def completeWith(cause: Option[Throwable]) = { + if (enableInvariantCheck) { + ErrorUtil.requireState( + !hasCompleted, + show"Cannot complete source $name twice with cause $cause. It has previously been completed with $getCompletion.", + ) + } + completedWith = Some(cause) + } + + def checkInvariant(context: String): Unit = { + import TraceContext.Implicits.Empty.* + ErrorUtil.requireState( + active || !isInBucket, + s"[$context] Stopped source $name must have an empty lastBucket", + ) + } + } + + /** We use our own internal IDs [[com.digitalasset.canton.util.OrderedBucketMergeHub.OrderedSourceId]]. + * `Name`s are not good enough: a reconfiguration may stop the previous ordered source + * and create a new one with the same name; so we would not be able to distinguish between the old and the new one. + */ + private[this] val orderedSourceIdGenerator = new AtomicInteger() + private[this] def nextOrderedSourceId: OrderedSourceId = + orderedSourceIdGenerator.getAndIncrement() + + /** The currently configured threshold */ + private[this] var currentThreshold: Int = 0 + + /** Exclusive lower bound for the next offset to emit. */ + private[this] var lowerBoundNextOffsetExclusive: Offset = ops.exclusiveLowerBoundForBegin + + /** Caches the last value that was queued for emission, if any. + * If so, its offset is at most [[lowerBoundNextOffsetExclusive]]. + */ + private[this] var lastBucketQueuedForEmission: Option[OutputElement[Name, A]] = None + + /** Contains the equivalence classes of the inspected elements from the ordered sources so far. + * + * This is somewhat inefficient: whenever we emit an element, + * we need to clean up the outdated buckets with lower or equal offset, + * which looks at all buckets. We could avoid this by keeping the buckets in a map + * ordered by offset, but this seemed overkill so far given that the expected number of buckets is small + * (<< 100). + */ + private[this] val buckets + : mutable.Map[ops.Bucket, NonEmpty[Seq[BucketElement[OrderedSource, A]]]] = + mutable.Map.empty[ops.Bucket, NonEmpty[Seq[BucketElement[OrderedSource, A]]]] + + /** Whether upstream has completed and we're now just winding down by stopping all the remaining ordered sources */ + private[this] var upstreamCompleted: Option[Option[Throwable]] = None + + /** A promise for the materialized future. + * To be completed when the stage has completed and all ordered sources have finished. + */ + private[this] val completionPromise = Promise[Done]() + def completionFuture: Future[Done] = completionPromise.future + + /** Collects the completion futures from all ordered sources + * so that they can be passed into the [[completionFuture]]. + */ + private[this] val flushFutureForOrderedSources = + new FlushFuture("OrderedBucketMergeHub", loggerFactory) + + // The invariant should hold at the start. + checkInvariantIfEnabled("Constructor") + + private[this] val outHandler: OutHandler = new OutHandler { + override def onPull(): Unit = () + + override def onDownstreamFinish(cause: Throwable): Unit = { + noTracingLogger.debug("Downstream cancelled: stopping immediately") + checkInvariantIfEnabled(s"$qualifiedNameOfCurrentFunc begin") + // Propagate the cancellation upstream + cancel(in, cause) + stopAllActiveSources() + completeStage(None, force = true) + } + } + setHandler(out, LoggingOutHandler(noTracingLogger, "OrderedBucketMergeHub.out")(outHandler)) + + private[this] val configChangeHandler: InHandler = new InHandler { + override def onPush(): Unit = { + checkInvariantIfEnabled(s"in-handler $qualifiedNameOfCurrentFunc begin") + + val nextConfig = grab(in) + noTracingLogger.debug(s"Next config $nextConfig") + + val sourcesToStopB = Seq.newBuilder[(OrderedSourceId, OrderedSource)] + val namesToKeepB = Set.newBuilder[Name] + orderedSources.foreach { case (id, orderedSource) => + val keep = nextConfig.sources.get(orderedSource.name).contains(orderedSource.config) + if (keep) namesToKeepB += orderedSource.name + else if (orderedSource.isActive) sourcesToStopB += (id -> orderedSource) + } + val sourcesToStop = sourcesToStopB.result() + val namesToKeep = namesToKeepB.result() + + val sourcesToCreate = nextConfig.sources.view.filterKeys(!namesToKeep.contains(_)) + + sourcesToStop.foreach { case (id, source) => stopActiveSource(id, source) } + val materializedValues = sourcesToCreate.map { case (name, config) => + val materializedValue = createActiveSource(name, config) + name -> materializedValue + }.toMap + val loweredThreshold = currentThreshold > nextConfig.threshold.value + currentThreshold = nextConfig.threshold.value + + val newConfigAndMat = + nextConfig.map((name, config) => (config, materializedValues.get(name))) + emit(out, NewConfiguration(newConfigAndMat, lowerBoundNextOffsetExclusive)) + + // Immediately signal demand for the next config change + pull(in) + + // If the threshold has been lowered, check whether some buckets now reach the threshold. + // If so, emit the elements in offset order. + // If several buckets of the same offset reach the threshold, pick one of then non-deterministically. + // Then clean up the remaining buckets. + // + // The non-determinism happens only if the assumption about the number of faulty nodes is violated. + // In practice, the non-determinism can lead to a ledger fork and is therefore security sensitive. + // + // TODO(#14365) Decide what to do if we detect non-determinism + if (loweredThreshold) { + implicit val orderingOffset: Ordering[Offset] = ops.orderingOffset + val fullBucketsByOffset = buckets.toSeq + .filter { case (_, elems) => elems.sizeIs >= currentThreshold } + .sortBy { case (bucket, _) => ops.offsetOfBucket(bucket) } + fullBucketsByOffset.foreach { case (bucket, elems) => + emitOrEvictBucket(bucket, elems) + } + fullBucketsByOffset.lastOption.foreach { case (bucket, _) => + evictBucketsUpToIncluding(ops.offsetOfBucket(bucket)) + } + } + + checkInvariantIfEnabled(s"in-handler $qualifiedNameOfCurrentFunc end") + } + + override def onUpstreamFinish(): Unit = { + noTracingLogger.debug("Config source has completed. Draining all sources...") + terminateStage(None) + } + + override def onUpstreamFailure(ex: Throwable): Unit = { + noTracingLogger.debug("Config source has aborted. Draining all sources...", ex) + terminateStage(Some(ex)) + } + } + setHandler( + in, + LoggingInHandler(noTracingLogger, "OrderedBucketMergeHub.in")(configChangeHandler), + ) + + private[this] def terminateStage(cause: Option[Throwable]): Unit = { + checkInvariantIfEnabled(s"$qualifiedNameOfCurrentFunc begin") + // Do not immediately complete or fail the stage + // because this would immediately cancel all current ordered sources. + // Instead, we want to stop and drain them and only afterwards complete or fail the stage. + upstreamCompleted = Some(cause) + stopAllActiveSources() + completeStageIfDone() + checkInvariantIfEnabled(s"$qualifiedNameOfCurrentFunc end") + } + + private[this] def stopAllActiveSources(): Unit = { + orderedSources.foreach { case (id, source) => + if (source.isActive) stopActiveSource(id, source) + } + } + + override def preStart(): Unit = { + checkInvariantIfEnabled("preStart") + // Ask for the first config right at the beginning and make sure that we always pull. + pull(in) + } + + override def postStop(): Unit = { + // Remove references to avoid memory leak + lastBucketQueuedForEmission = None + // Under unforeseen circumstances (e.g., handlers failing due to exceptions), + // make sure that we complete the completion future. + completeCompletionFuture() + } + + /** Callback used to receive signals from the ordered sources. + * Avoids that we have to access the mutable states from the ordered source's handlers. + */ + private[this] val orderedSourceCallback = + getAsyncCallback[OrderedSourceSignal[Name, A]]( + PekkoUtil.loggingAsyncCallback( + noTracingLogger, + "OrderedBucketMergeHub.orderedSourceCallback", + )( + processOrderedSourceSignal + ) + ) + + private[this] def processOrderedSourceSignal(signal: OrderedSourceSignal[Name, A]): Unit = + signal match { + case OrderedSourceSignal.NextElement(id, name, elem) => + processNextElement(id, name, elem) + + case OrderedSourceSignal.Completed(id, name, cause) => + processCompletion(id, name, cause) + } + + /** Process the next element from the given source, irrespective of whether it is being stopped. + */ + private[this] def processNextElement(id: OrderedSourceId, name: Name, elem: A): Unit = { + checkInvariantIfEnabled( + s"$qualifiedNameOfCurrentFunc begin: id=$id, name=$name, offset: ${ops.offsetOf(elem)}" + ) + + implicit val traceContext: TraceContext = ops.traceContextOf(elem) + orderedSources.get(id) match { + case None => + // This should only happen if the completion message of a source overtakes the signal for the next element. + // It is not clear from the specification of getAsyncCallback whether this can happen. + // But if it happens, it does not really matter; we simply drop the element. + logger.debug( + s"Dropping element at offset ${ops.offsetOf(elem)} from inactive source $name (id $id)" + ) + case Some(source) => + // Since we do not keep track of the signals going through the AsyncCallback, + // we cannot really phrase the following conditions as an invariant check. + if (enableInvariantCheck) { + ErrorUtil.requireState( + source.name == name, + s"Name of the ordered source ${source.name} (id $id) differed from declared name $name in next-element signal", + ) + ErrorUtil.requireState( + !source.isInBucket, + s"Received a next element from ordered source $name although there is already one waiting in a bucket", + ) + } + + if (source.isActive) { + val bucket = ops.bucketOf(elem) + val offset = ops.offsetOfBucket(bucket) + if (ops.orderingOffset.compare(offset, lowerBoundNextOffsetExclusive) <= 0) { + logger.debug( + show"Dropping next element from source $name with offset $offset because it is not above the lower offset bound of $lowerBoundNextOffsetExclusive" + ) + pullIfNotCompleted(id) + } else { + implicit val prettyBucket: Pretty[ops.Bucket] = ops.prettyBucket + logger.debug( + show"Adding element with offset $offset from source $name (id: $id) to bucket $bucket" + ) + val updatedBucketO = buckets.updateWith(bucket) { + case None => Some(NonEmpty(Seq, BucketElement(id, source, elem))) + case Some(elems) => Some(BucketElement(id, source, elem) +: elems) + } + val elems = updatedBucketO.getOrElse( + ErrorUtil.internalError( + new IllegalStateException( + "updateWith returned None even though the update function always returns Some" + ) + ) + ) + source.addToBucket(bucket) + + // Have we have found the next element to emit? + if (elems.sizeIs >= currentThreshold) { + emitBucket(bucket, offset, elems) + evictBucketsUpToIncluding(offset) + } + } + } else { + // Since the source has been stopped, we do not need to emit a termination signal here, + // so we can simply pull to drain it. + logger.debug( + s"Drained one element at offset ${ops.offsetOf(elem)} from source $name (id $id)" + ) + pullIgnoringClosed(source.inlet) + } + } + + checkInvariantIfEnabled( + s"$qualifiedNameOfCurrentFunc end: id=$id, name=$name, offset: ${ops.offsetOf(elem)}" + ) + } + + private[this] def emitBucket( + bucket: ops.Bucket, + offset: Offset, + elems: NonEmpty[Seq[BucketElement[OrderedSource, A]]], + )(implicit traceContext: TraceContext): Unit = { + // Unless this is caused by a configuration change, + // we log this with the trace context that causes the bucket to reach the threshold. + // This makes sense because in a sequencer client, all the equivalent sequenced events will have the same trace context. + // So overall we get better tracing than if we used an empty trace context. + logger.debug( + s"Bucket $bucket for offset $offset has reached the threshold of $currentThreshold. Emitting next element." + ) + buckets.remove(bucket).discard[Option[NonEmpty[Seq[BucketElement[OrderedSource, A]]]]] + removeFromBuckets(elems) + val merged = elems.map { case BucketElement(_id, source, elem) => source.name -> elem }.toMap + val output = OutputElement(merged) + lowerBoundNextOffsetExclusive = offset + lastBucketQueuedForEmission = Some(output) + emit( + out, + output, + // Crucially, pull only after the emission. + // This ensures that the emission buffer for OutputElements remains bounded + () => pullMultipleIfNotCompleted(elems), + ) + } + + /** Removes all buckets up to and including `offset` and pulls from the sources whose elements were rejected */ + private[this] def evictBucketsUpToIncluding(offset: Offset): Unit = + buckets.filterInPlace { (bucket, elems) => + val bucketOffset = ops.offsetOfBucket(bucket) + val retain = ops.orderingOffset.compare(bucketOffset, offset) > 0 + if (!retain) { + // TODO(#14365) This indicates some faulty nodes. Decide whether we should alert + removeBucketElements(bucket, bucketOffset, elems) + } + retain + } + + /** Emit (and pull thereafter) or evict (and pull immediately) the given bucket, + * depending on whether its offset is above the [[lowerBoundNextOffsetExclusive]]. + */ + private[this] def emitOrEvictBucket( + bucket: ops.Bucket, + elems: NonEmpty[Seq[BucketElement[OrderedSource, A]]], + ): Unit = { + import TraceContext.Implicits.Empty.* + val offset = ops.offsetOfBucket(bucket) + if (ops.orderingOffset.compare(offset, lowerBoundNextOffsetExclusive) > 0) { + emitBucket(bucket, offset, elems) + } else { + buckets.remove(bucket).discard[Option[NonEmpty[Seq[BucketElement[OrderedSource, A]]]]] + removeBucketElements(bucket, offset, elems) + } + } + + /** Updates the ordered sources after their elements have been rejected from the bucket and pulls them */ + private[this] def removeBucketElements( + bucket: ops.Bucket, + offset: Offset, + elems: NonEmpty[Seq[BucketElement[OrderedSource, A]]], + ): Unit = { + if (logger.underlying.isDebugEnabled) { + implicit val prettyBucket: Pretty[ops.Bucket] = ops.prettyBucket + val droppedSources = elems.map { case BucketElement(id, source, elem) => + s"${source.name} (id: $id)".unquoted + } + noTracingLogger.debug( + show"Dropping bucket $bucket for offset $offset with ${elems.size} sources: $droppedSources" + ) + } + removeFromBuckets(elems) + pullMultipleIfNotCompleted(elems) + } + + /** Process the signal that an ordered source has completed. + * If the ordered source still has a last element in a bucket, + * we wait until its fate is decided before we output the termination signal. + */ + private[this] def processCompletion( + id: OrderedSourceId, + name: Name, + cause: Option[Throwable], + ): Unit = { + checkInvariantIfEnabled(s"$qualifiedNameOfCurrentFunc begin: $name (id $id)") + orderedSources.get(id) match { + case None => + noTracingLogger.error( + s"Processed completion signal from inactive source $name (id $id)" + ) + completeStageIfDone() + case Some(source) => + noTracingLogger.debug(s"Processing completion signal from source $name (id $id)") + source.completeWith(cause) + if (!source.isInBucket) { + orderedSources.remove(id).discard[Option[OrderedSource]] + if (source.isActive) { + // It is safe to call emit here because if the source's last element is still waiting for emission, + // then this emission will be queued afterwards and thus come after the source's last element. + emit( + out, + ActiveSourceTerminated(source.name, cause), + () => completeStageIfDone(), + ) + } else { + // No need to emit a termination signal as we're draining the source + completeStageIfDone() + } + } else { + // Nothing to do here. + // The source is active by the invariant because lastBucket is non-empty. + // The source's lastBucket element will eventually be emitted or rejected, + // and then we will call `pullIfNotCompleted`. + // At that point, we will emit the termination event + // unless the source has been stopped in between. + } + } + checkInvariantIfEnabled(s"$qualifiedNameOfCurrentFunc end: $name (id $id)") + } + + private[this] def completeStageIfDone(): Unit = + upstreamCompleted.foreach { cause => completeStage(cause, force = false) } + + private[this] def completeStage(cause: Option[Throwable], force: Boolean): Unit = { + lazy val outstanding = orderedSources.map { case (id, source) => + s"${source.name} (id $id)".unquoted + }.toSeq + if (orderedSources.isEmpty || force) { + noTracingLogger.debug("Completing the OrderedBucketMergeHub stage") + if (orderedSources.nonEmpty) { + noTracingLogger.info( + show"Forcefully cancelling the remaining ordered sources: $outstanding" + ) + orderedSources.foreach { case (_id, source) => source.inlet.cancel() } + } + cause.fold(completeStage())(failStage) + completeCompletionFuture() + } else + noTracingLogger.debug( + show"Cannot complete OrderedBucketMergeHub stage due to remaining ordered sources: $outstanding" + ) + } + + private[this] def completeCompletionFuture(): Unit = { + val directExecutionContext = DirectExecutionContext(noTracingLogger) + completionPromise.completeWith( + flushFutureForOrderedSources.flush().map(_ => Done)(directExecutionContext) + ) + } + + private[this] def removeFromBuckets(elems: Seq[BucketElement[OrderedSource, A]]): Unit = + elems.foreach { case BucketElement(_id, source, _elem) => source.removeFromBucket() } + + private[this] def pullMultipleIfNotCompleted( + toPull: Seq[BucketElement[OrderedSource, A]] + ): Unit = + toPull.foreach { case BucketElement(id, _, _) => pullIfNotCompleted(id) } + + private[this] def pullIfNotCompleted(id: OrderedSourceId): Unit = { + orderedSources.get(id).foreach { source => + source.getCompletion match { + case None => pullIgnoringClosed(source.inlet) + case Some(cause) => + noTracingLogger.debug(s"Removing completed source ${source.name} (id $id)") + // The ordered source has completed beforehand. Let's send the termination signal! + orderedSources.remove(id).discard[Option[OrderedSource]] + emit(out, ActiveSourceTerminated(source.name, cause), () => completeStageIfDone()) + } + } + } + + private[this] def pullIgnoringClosed(inlet: SubSinkInlet[A]): Unit = { + // Use exception handling instead of checking .isClosed + // because it is unclear whether closing can happen concurrently. + try { + inlet.pull() + } catch { + case e: IllegalArgumentException if e.getMessage.contains("cannot pull closed port") => + // If the ordered source has already been closed, then there has been or will be a Completed callback + // that produces the ActiveSourceTerminated output if necessary and remove the source + } + } + + private[this] def createActiveSource(name: Name, config: Config): M = { + val id = nextOrderedSourceId + val newSource = ops.makeSource( + name, + config, + lowerBoundNextOffsetExclusive, + lastBucketQueuedForEmission.map(ops.toPriorElement).orElse(ops.priorElement), + ) + val subsink = new SubSinkInlet[A](s"OrderedMergeHub.sink($name-$id)") + val inHandler = + new ActiveSourceInHandler(id, name, () => subsink.grab(), orderedSourceCallback) + subsink.setHandler( + LoggingInHandler(noTracingLogger, s"OrderedMergeHub.sink($name-$id).in")(inHandler) + ) + + val killSwitchCell = new SingleUseCell[KillSwitch] + val source = new OrderedSource(name, config, subsink, killSwitchCell) + orderedSources.put(id, source).discard[Option[OrderedSource]] + + val graph = newSource.to(subsink.sink) + val (killSwitch, doneF, materializedValue) = subFusingMaterializer.materialize( + graph, + defaultAttributes = enclosingAttributes, + ) + + killSwitchCell.putIfAbsent(killSwitch).discard[Option[KillSwitch]] + flushFutureForOrderedSources.addToFlushWithoutLogging(s"source $name (id $id)")(doneF) + + subsink.pull() + materializedValue + } + + private[this] def stopActiveSource(id: OrderedSourceId, source: OrderedSource): Unit = { + val killSwitch = source.killSwitchCell.getOrElse { + import TraceContext.Implicits.Empty.* + ErrorUtil.invalidState(s"Kill switch for source ${source.name} (id: $id) is not available") + } + noTracingLogger.debug(s"Stopping source ${source.name} (id $id) by pulling its kill switch") + killSwitch.shutdown() + source.stop() + + // Remove the source's elements from its bucket. + source.getBucket match { + case Some(bucket) => + noTracingLogger.debug( + s"Removing ${source.name} (id $id)'s last element from bucket $bucket" + ) + buckets + .updateWith(bucket) { + case Some(elems) => NonEmpty.from(elems.filterNot(_.id == id)) + case None => + import TraceContext.Implicits.Empty.* + ErrorUtil.invalidState( + s"Invariant violation: Ordered source ${source.name} (id $id) is not present in its lastBucket" + ) + } + .discard[Option[NonEmpty[Seq[BucketElement[OrderedSource, A]]]]] + source.removeFromBucket() + if (!source.hasCompleted) { + pullIgnoringClosed(source.inlet) + } else { + orderedSources.remove(id).discard[Option[OrderedSource]] + } + case None => + // Nothing to do here + // If the source has been pulled, then it will eventually send a NextElement or Completion signal + // and this signal will either pull again or remove the source as a whole. + // Otherwise, an element of the source is in the emission buffer, + // so we will pull the source again after it has been emitted. + } + } + + private[this] def checkInvariantIfEnabled(context: => String): Unit = + if (enableInvariantCheck) invariant(context) else () + + private[this] def invariant(context: String): Unit = { + import TraceContext.Implicits.Empty.* + implicit val prettyBucket: Pretty[ops.Bucket] = ops.prettyBucket + // Must only be called from the main graph stage logic and not asynchronously! + + def sourcesInBucketsConsistent(): Unit = { + buckets.foreach { case (bucket, elems) => + elems.foreach { elem => + val source = orderedSources.getOrElse( + elem.id, + ErrorUtil.internalError( + new IllegalStateException( + s"[$context] Bucket $bucket refers to nonexistent source ${elem.source.name} (id: ${elem.id})" + ) + ), + ) + ErrorUtil.requireState( + source eq elem.source, + s"[$context] Bucket $bucket's source id ${elem.id} refers to a different source than orderedSources: ${elem.source} vs. $source", + ) + } + } + } + + def uniqueBucketedSources(): Unit = { + val namesInMoreThanOneBucket = + buckets.toSeq + .flatMap { case (bucket, elems) => elems.map(_.source.name -> bucket) } + .groupBy(_._1) + .filter { case (_, buckets) => buckets.sizeIs > 1 } + ErrorUtil.requireState( + namesInMoreThanOneBucket.isEmpty, + show"[$context] Source names appear multiple times in the buckets: $namesInMoreThanOneBucket", + ) + // Since we check that orderedSources is consistent with buckets, we can deduce that each ID appears at most once. + } + + def lastBucketExists(): Unit = { + val lastBuckets = orderedSources.flatMap { case (id, source) => + source.getBucket.map(_ -> id -> source.name).toList + } + lastBuckets.foreach { case ((bucket, id), name) => + val elems = buckets.getOrElse( + bucket, + ErrorUtil.internalError( + new IllegalStateException( + s"[$context] Source $name (id $id)'s lastBucket refers to non-existent bucket $bucket" + ) + ), + ) + ErrorUtil.requireState( + elems.exists(_.id == id), + s"[$context] Source $id's lastBucket $bucket does not contain an element from the source", + ) + } + } + + def lastBucketComplete(): Unit = { + buckets.foreach { case (bucket, elems) => + elems.foreach { elem => + val lastBucket = elem.source.getBucket + ErrorUtil.requireState( + lastBucket.contains(bucket), + s"[$context] Source ${elem.source.name} (id: ${elem.id}) does not contain the bucket its element is in: $lastBucket", + ) + } + } + } + + def bucketsBelowThreshold(): Unit = { + buckets.foreach { case (bucket, elems) => + ErrorUtil.requireState( + elems.sizeIs < currentThreshold, + s"[$context] Bucket $bucket has more (${elems.size}) elements than the current threshold $currentThreshold", + ) + } + } + + def orderedSourceInvariant(): Unit = { + orderedSources.foreach { case (id, source) => + source.checkInvariant(s"$context/source ${source.name} (id $id)") + } + } + + def lastBucketQueuedForEmissionInvariant(): Unit = { + lastBucketQueuedForEmission.foreach { case OutputElement(elems) => + val buckets = elems.values.map(ops.bucketOf).toSeq + ErrorUtil.requireState( + buckets.distinct.sizeIs == 1, + s"[$context] Last bucket queued for emission contains elements from different buckets: $buckets", + ) + + val (_, elem) = elems.head1 + val offset = ops.offsetOf(elem) + ErrorUtil.requireState( + ops.orderingOffset.compare(offset, lowerBoundNextOffsetExclusive) <= 0, + s"[$context] Last bucket queued for emission with offset $offset must at most be the lower bound at $lowerBoundNextOffsetExclusive", + ) + } + } + + sourcesInBucketsConsistent() + uniqueBucketedSources() + lastBucketExists() + lastBucketComplete() + bucketsBelowThreshold() + orderedSourceInvariant() + lastBucketQueuedForEmissionInvariant() + } + } + + /** This handler receives the elements from an ordered source. + * It belongs to a different materialized graph than the [[BucketingLogic]], + * so it must not access its mutable state. To enforce this, + * this class is lexicographically outside of the [[BucketingLogic]]. + * Instead, we go through the provided [[org.apache.pekko.stream.stage.AsyncCallback]] + * to signal the arrival and completion thread-safely. + */ + private[this] class ActiveSourceInHandler( + id: OrderedSourceId, + name: Name, + grab: () => A, + callback: AsyncCallback[OrderedSourceSignal[Name, A]], + ) extends InHandler { + override def onPush(): Unit = { + val elem = grab() + if (logger.underlying.isDebugEnabled) { + implicit val traceContext: TraceContext = ops.traceContextOf(elem) + logger.debug( + s"Signalling element with offset ${ops.offsetOf(elem)} from source $name (id $id)" + ) + } + callback.invoke( + OrderedBucketMergeHub.OrderedSourceSignal.NextElement(id, name, elem) + ) + } + + override def onUpstreamFinish(): Unit = { + noTracingLogger.debug(s"Signalling completion of source $name (id $id)") + callback.invoke( + OrderedBucketMergeHub.OrderedSourceSignal.Completed(id, name, None) + ) + } + + override def onUpstreamFailure(ex: Throwable): Unit = { + noTracingLogger.debug(s"Signalling abortion of source $name (id $id)", ex) + callback.invoke( + OrderedBucketMergeHub.OrderedSourceSignal.Completed(id, name, Some(ex)) + ) + } + } + + override def createLogicAndMaterializedValue( + inheritedAttributes: Attributes + ): (GraphStageLogic, Future[Done]) = { + val logic = new BucketingLogic(inheritedAttributes) + logic -> logic.completionFuture + } +} + +object OrderedBucketMergeHub { + + /** Outputs of the [[OrderedBucketMergeHub]], combines actual data with control messages */ + sealed trait Output[Name, +ConfigAndMat, +A, +Offset] extends Product with Serializable { + def map[ConfigAndMat2, A2, Offset2]( + fConfigAndMat: (Name, ConfigAndMat) => ConfigAndMat2, + fA: (Name, A) => A2, + fOffset: Offset => Offset2, + ): Output[Name, ConfigAndMat2, A2, Offset2] + } + + /** Actual data output */ + final case class OutputElement[Name, +A](elem: NonEmpty[Map[Name, A]]) + extends Output[Name, Nothing, A, Nothing] { + def map[A2](fA: (Name, A) => A2): OutputElement[Name, A2] = OutputElement( + elem.map { case (name, a) => name -> fA(name, a) }.toMap + ) + override def map[ConfigAndMat2, A2, Offset2]( + fConfigAndMat: (Name, Nothing) => ConfigAndMat2, + fA: (Name, A) => A2, + fOffset: Nothing => Offset2, + ): OutputElement[Name, A2] = map(fA) + } + + sealed trait ControlOutput[Name, +ConfigAndMat, +Offset] + extends Output[Name, ConfigAndMat, Nothing, Offset] { + def map[Config2, Offset2]( + fConfigAndMat: (Name, ConfigAndMat) => Config2, + fOffset: Offset => Offset2, + ): ControlOutput[Name, Config2, Offset2] + + override def map[Config2, A2, Offset2]( + fConfigAndMat: (Name, ConfigAndMat) => Config2, + fA: (Name, Nothing) => A2, + fOffset: Offset => Offset2, + ): ControlOutput[Name, Config2, Offset2] = map(fConfigAndMat, fOffset) + } + + /** Signals the new configuration that is active for all subsequent elements until the next [[NewConfiguration]] + * and the materialized values for the newly created sources. + */ + final case class NewConfiguration[Name, +ConfigAndMat, +Offset]( + newConfig: OrderedBucketMergeConfig[Name, ConfigAndMat], + startingOffset: Offset, + ) extends ControlOutput[Name, ConfigAndMat, Offset] { + override def map[ConfigAndMat2, Offset2]( + fConfigAndMat: (Name, ConfigAndMat) => ConfigAndMat2, + fOffset: Offset => Offset2, + ): NewConfiguration[Name, ConfigAndMat2, Offset2] = NewConfiguration( + newConfig.map(fConfigAndMat), + fOffset(startingOffset), + ) + } + + /** Signals that the source has terminated with the given cause. + * Downstream is responsible for reacting to the termination signal + * and changing the configuration if necessary. + */ + final case class ActiveSourceTerminated[Name](name: Name, cause: Option[Throwable]) + extends ControlOutput[Name, Nothing, Nothing] { + override def map[Config2, Offset2]( + fConfigAndMat: (Name, Nothing) => Config2, + fOffset: Nothing => Offset2, + ): ActiveSourceTerminated[Name] = this + } + + /** The internal type of IDs for ordered sources */ + private type OrderedSourceId = Int + + /** Internal signal between the ordered sources and the graph stage logic of the [[OrderedBucketMergeHub]] */ + private sealed trait OrderedSourceSignal[Name, +A] extends Product with Serializable { + def id: OrderedSourceId + + def name: Name + } + + private object OrderedSourceSignal { + final case class NextElement[Name, +A]( + override val id: OrderedSourceId, + override val name: Name, + elem: A, + ) extends OrderedSourceSignal[Name, A] + + final case class Completed[Name]( + override val id: OrderedSourceId, + override val name: Name, + cause: Option[Throwable], + ) extends OrderedSourceSignal[Name, Nothing] + } + + private final case class BucketElement[S, +A]( + id: OrderedSourceId, + source: S, + elem: A, + ) +} + +/** @param threshold The threshold of equivalent elements to reach before it can be emitted. + * @param sources The configurations to be used with [[OrderedBucketMergeHubOps.makeSource]] to create a source. + */ +final case class OrderedBucketMergeConfig[Name, +Config]( + threshold: PositiveInt, + sources: NonEmpty[Map[Name, Config]], +) { + def map[Config2](f: (Name, Config) => Config2): OrderedBucketMergeConfig[Name, Config2] = + OrderedBucketMergeConfig( + threshold, + sources.map { case (name, config) => name -> f(name, config) }.toMap, + ) +} + +trait OrderedBucketMergeHubOps[Name, A, Config, Offset, +M] { + + /** The type of equivalence classes for the merged elements */ + type Bucket + def prettyBucket: Pretty[Bucket] + + /** Defines an equivalence relation on `A` */ + def bucketOf(x: A): Bucket + + /** The ordering for the offsets. + * This defines a total preorder (AKA total quasi-order) on buckets + * and elements via the projections [[offsetOfBucket]] and [[bucketOf]] + */ + def orderingOffset: Ordering[Offset] + + def offsetOfBucket(bucket: Bucket): Offset + + // Make sure that the offset assignment respects bucketing + final def offsetOf(x: A): Offset = offsetOfBucket(bucketOf(x)) + + /** The initial offset to start from */ + def exclusiveLowerBoundForBegin: Offset + + /** The type of prior elements that is passed to [[makeSource]]. + * [[toPriorElement]] defines an abstraction function from + * [[com.digitalasset.canton.util.OrderedBucketMergeHub.OutputElement]]s. + */ + type PriorElement + + /** The prior element to be passed to [[makeSource]] at the start */ + def priorElement: Option[PriorElement] + + /** An abstraction function from [[com.digitalasset.canton.util.OrderedBucketMergeHub.OutputElement]] to [[PriorElement]] + */ + def toPriorElement(output: OutputElement[Name, A]): PriorElement + + def traceContextOf(x: A): TraceContext + + /** Creates a new source upon a config change. + * The returned source is materialized at most once. + * To close the source, the materialized [[org.apache.pekko.stream.KillSwitch]] is pulled + * and the source is drained until it completes. + * The materialized [[scala.concurrent.Future]] should complete when all internal computations have stopped. + * The [[OrderedBucketMergeHub]]'s materialized [[scala.concurrent.Future]] completes only after + * these materialized futures of all created ordered sources have completed. + * + * @param priorElement The prior element that last reached the threshold or [[priorElement]] if there was none. + */ + def makeSource( + name: Name, + config: Config, + exclusiveStart: Offset, + priorElement: Option[PriorElement], + ): Source[A, (KillSwitch, Future[Done], M)] +} + +object OrderedBucketMergeHubOps { + def apply[Name, A <: HasTraceContext, Config, Offset: Ordering, B: Pretty, M]( + initialOffset: Offset + )(toBucket: A => B, toOffset: B => Offset)( + mkSource: (Name, Config, Offset, Option[A]) => Source[A, (KillSwitch, Future[Done], M)] + ): OrderedBucketMergeHubOps[Name, A, Config, Offset, M] = + new OrderedBucketMergeHubOps[Name, A, Config, Offset, M] { + override type PriorElement = A + override type Bucket = B + override def prettyBucket: Pretty[Bucket] = implicitly + override def bucketOf(x: A): Bucket = toBucket(x) + override def orderingOffset: Ordering[Offset] = implicitly + override def offsetOfBucket(bucket: Bucket): Offset = toOffset(bucket) + override def exclusiveLowerBoundForBegin: Offset = initialOffset + override def traceContextOf(x: A): TraceContext = x.traceContext + override def makeSource( + name: Name, + config: Config, + exclusiveStart: Offset, + priorElement: Option[PriorElement], + ): Source[A, (KillSwitch, Future[Done], M)] = + mkSource(name, config, exclusiveStart, priorElement) + override def priorElement: Option[A] = None + override def toPriorElement(output: OutputElement[Name, A]): A = output.elem.head1._2 + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/PekkoUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/PekkoUtil.scala new file mode 100644 index 0000000000..dea1e8610a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/PekkoUtil.scala @@ -0,0 +1,776 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.{Applicative, Eval, Functor, Traverse} +import com.daml.grpc.adapter.{ExecutionSequencerFactory, PekkoExecutionSequencerPool} +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.concurrent.{DirectExecutionContext, Threading} +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.lifecycle.UnlessShutdown.{AbortedDueToShutdown, Outcome} +import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, UnlessShutdown} +import com.digitalasset.canton.logging.pretty.Pretty +import com.digitalasset.canton.logging.{HasLoggerName, NamedLoggingContext} +import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.TryUtil.* +import com.typesafe.config.ConfigFactory +import com.typesafe.scalalogging.Logger +import org.apache.pekko.actor.ActorSystem +import org.apache.pekko.stream.scaladsl.{Flow, FlowOps, FlowOpsMat, Keep, RunnableGraph, Source} +import org.apache.pekko.stream.stage.{ + GraphStageLogic, + GraphStageWithMaterializedValue, + InHandler, + OutHandler, +} +import org.apache.pekko.stream.{ + ActorAttributes, + Attributes, + FlowShape, + Inlet, + KillSwitch, + KillSwitches, + Materializer, + Outlet, + QueueCompletionResult, + QueueOfferResult, + Supervision, + UniqueKillSwitch, +} +import org.apache.pekko.{Done, NotUsed} + +import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference} +import scala.collection.concurrent.TrieMap +import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{ExecutionContext, Future, Promise} +import scala.language.implicitConversions +import scala.util.control.NonFatal +import scala.util.{Failure, Success, Try} + +object PekkoUtil extends HasLoggerName { + + /** Utility function to run the graph supervised and stop on an unhandled exception. + * + * By default, an Pekko flow will discard exceptions. Use this method to avoid discarding exceptions. + */ + def runSupervised[T]( + reporter: Throwable => Unit, + graph: RunnableGraph[T], + debugLogging: Boolean = false, + )(implicit + mat: Materializer + ): T = { + val tmp = graph + .addAttributes(ActorAttributes.withSupervisionStrategy { ex => + reporter(ex) + Supervision.Stop + }) + (if (debugLogging) + tmp.addAttributes(ActorAttributes.debugLogging(true)) + else tmp) + .run() + } + + /** Create an Actor system using the existing execution context `ec` + */ + def createActorSystem(namePrefix: String)(implicit ec: ExecutionContext): ActorSystem = + ActorSystem( + namePrefix + "-actor-system", + defaultExecutionContext = Some(ec), + config = Some(ConfigFactory.load), + ) + + /** Create a new execution sequencer factory (mainly used to create a ledger client) with the existing actor system `actorSystem` + */ + def createExecutionSequencerFactory(namePrefix: String, logger: Logger)(implicit + actorSystem: ActorSystem + ): ExecutionSequencerFactory = + new PekkoExecutionSequencerPool( + namePrefix + "-execution-sequencer", + actorCount = Threading.detectNumberOfThreads(logger), + ) + + /** Remembers the last `memory` many elements that have already been emitted previously. + * Passes those remembered elements downstream with each new element. + * The current element is the [[com.daml.nonempty.NonEmptyCollInstances.NEPreservingOps.last1]] + * of the sequence. + * + * [[remember]] differs from [[org.apache.pekko.stream.scaladsl.FlowOps.sliding]] in + * that [[remember]] emits elements immediately when the given source emits, + * whereas [[org.apache.pekko.stream.scaladsl.FlowOps.sliding]] only after the source has emitted enough elements to fill the window. + */ + def remember[A, Mat]( + graph: FlowOps[A, Mat], + memory: NonNegativeInt, + ): graph.Repr[NonEmpty[Seq[A]]] = { + // Prepend window many None to the given source + // so that sliding starts emitting upon the first element received + graph + .map(Some(_)) + .prepend(Source(Seq.fill(memory.value)(None))) + .sliding(memory.value + 1) + .mapConcat { noneOrElems => + // dropWhile is enough because None can only appear in the prefix + val elems = noneOrElems + .dropWhile(_.isEmpty) + .map(_.getOrElse(throw new NoSuchElementException("Some did not contain a value"))) + // Do not emit anything if `noneOrElems` is all Nones, + // because then the source completed before emitting any elements + NonEmpty.from(elems) + } + } + + /** A version of [[org.apache.pekko.stream.scaladsl.FlowOps.mapAsync]] that additionally allows to pass state of type `S` between + * every subsequent element. Unlike [[org.apache.pekko.stream.scaladsl.FlowOps.statefulMapConcat]], the state is passed explicitly. + * Must not be run with supervision strategies [[org.apache.pekko.stream.Supervision.Restart]] nor [[org.apache.pekko.stream.Supervision.Resume]] + */ + def statefulMapAsync[Out, Mat, S, T](graph: FlowOps[Out, Mat], initial: S)( + f: (S, Out) => Future[(S, T)] + )(implicit loggingContext: NamedLoggingContext): graph.Repr[T] = { + val directExecutionContext = DirectExecutionContext(loggingContext.tracedLogger) + graph + .scanAsync((initial, Option.empty[T])) { case ((state, _), next) => + f(state, next) + .map { case (newState, out) => (newState, Some(out)) }(directExecutionContext) + } + .drop(1) // The first element is `(initial, empty)`, which we want to drop + .map( + _._2.getOrElse( + ErrorUtil.internalError(new NoSuchElementException("scanAsync did not return an element")) + ) + ) + } + + /** Version of [[org.apache.pekko.stream.scaladsl.FlowOps.mapAsync]] for a [[com.digitalasset.canton.lifecycle.FutureUnlessShutdown]]. + * If `f` returns [[com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown]] on one element of + * `source`, then the returned source returns [[com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown]] + * for all subsequent elements as well. + * + * If `parallelism` is one, ensures that `f` is called sequentially for each element of `source` + * and that `f` is not invoked on later stream elements if `f` returns + * [[com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown]] for an earlier element. + * If `parellelism` is greater than one, `f` may be invoked on later stream elements + * even though an earlier invocation results in `f` returning + * [[com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown]]. + * + * '''Emits when''' the Future returned by the provided function finishes for the next element in sequence + * + * '''Backpressures when''' the number of futures reaches the configured parallelism and the downstream + * backpressures or the first future is not completed + * + * '''Completes when''' upstream completes and all futures have been completed and all elements have been emitted, + * including those for which the future did not run due to earlier [[com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown]]s. + * + * '''Cancels when''' downstream cancels + * + * @param parallelism The parallelism level. Must be at least 1. + * @throws java.lang.IllegalArgumentException if `parallelism` is not positive. + */ + def mapAsyncUS[A, Mat, B](graph: FlowOps[A, Mat], parallelism: Int)( + f: A => FutureUnlessShutdown[B] + )(implicit loggingContext: NamedLoggingContext): graph.Repr[UnlessShutdown[B]] = { + require(parallelism > 0, "Parallelism must be positive") + // If parallelism is 1, then the caller expects that the futures run in sequential order, + // so if one of them aborts due to shutdown we must not run the subsequent ones. + // For parallelism > 1, we do not have to stop immediately, as there is always a possible execution + // where the future may have been started before the first one aborted. + // So we just need to throw away the results of the futures and convert them into aborts. + if (parallelism == 1) { + val directExecutionContext = DirectExecutionContext(loggingContext.tracedLogger) + statefulMapAsync(graph, initial = false) { (aborted, next) => + if (aborted) Future.successful(true -> AbortedDueToShutdown) + else f(next).unwrap.map(us => !us.isOutcome -> us)(directExecutionContext) + } + } else { + val discardedInitial: UnlessShutdown[B] = AbortedDueToShutdown + // Mutable reference to short-circuit one we've observed the first aborted due to shutdown. + val abortedFlag = new AtomicBoolean(false) + graph + .mapAsync(parallelism)(elem => + if (abortedFlag.get()) Future.successful(AbortedDueToShutdown) + else f(elem).unwrap + ) + .scan((false, discardedInitial)) { case ((aborted, _), next) => + if (aborted) (true, AbortedDueToShutdown) + else { + val abort = !next.isOutcome + if (abort) abortedFlag.set(true) + (abort, next) + } + } + .drop(1) // The first element is `(false, discardedInitial)`, which we want to drop + .map(_._2) + } + } + + /** Version of [[mapAsyncUS]] that discards the [[com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown]]s. + * + * '''Completes when''' upstream completes and all futures have been completed and all elements have been emitted. + */ + def mapAsyncAndDrainUS[A, Mat, B](graph: FlowOps[A, Mat], parallelism: Int)( + f: A => FutureUnlessShutdown[B] + )(implicit loggingContext: NamedLoggingContext): graph.Repr[B] = { + mapAsyncUS(graph, parallelism)(f) + // Important to use `collect` instead of `takeWhile` here + // so that the return source completes only after all `source`'s elements have been consumed. + // TODO(#13789) Should we cancel/pull a kill switch to signal upstream that no more elements are needed? + .collect { case Outcome(x) => x } + } + + /** Combines [[mapAsyncUS]] with [[statefulMapAsync]]. */ + def statefulMapAsyncUS[Out, Mat, S, T](graph: FlowOps[Out, Mat], initial: S)( + f: (S, Out) => FutureUnlessShutdown[(S, T)] + )(implicit loggingContext: NamedLoggingContext): graph.Repr[UnlessShutdown[T]] = { + implicit val directExecutionContext: ExecutionContext = + DirectExecutionContext(loggingContext.tracedLogger) + statefulMapAsync(graph, initial = Option(initial)) { + case (Some(s), next) => + f(s, next).unwrap.map { + case AbortedDueToShutdown => None -> AbortedDueToShutdown + case Outcome((nextS, t)) => Some(nextS) -> Outcome(t) + } + case (None, _next) => + Future.successful(None -> AbortedDueToShutdown) + } + } + + /** Combines two kill switches into one */ + class CombinedKillSwitch(private val killSwitch1: KillSwitch, private val killSwitch2: KillSwitch) + extends KillSwitch { + override def shutdown(): Unit = { + killSwitch1.shutdown() + killSwitch2.shutdown() + } + + override def abort(ex: Throwable): Unit = { + killSwitch1.abort(ex) + killSwitch2.abort(ex) + } + } + + /** Defines the policy when [[restartSource]] should restart the source, and the state from which the source should be restarted from. */ + trait RetrySourcePolicy[S, -A] { + + /** Determines whether the source should be restarted, and if so (([[scala.Some$]])), + * the backoff duration and the new state to restart from. + * Called after the current source has terminated normally or with an error. + * + * @param lastState The state that was used to create the current source + * @param lastEmittedElement The last element emitted by the current source and passed downstream. + * Downstream obviously need not yet have fully processed the element though. + * [[scala.None$]] if the current source did not emit anything, + * even if previous sources have emitted elements. + * @param lastFailure The error the current source failed with, if any. + */ + def shouldRetry( + lastState: S, + lastEmittedElement: Option[A], + lastFailure: Option[Throwable], + ): Option[(FiniteDuration, S)] + } + + object RetrySourcePolicy { + private val NEVER: RetrySourcePolicy[Any, Any] = new RetrySourcePolicy[Any, Any] { + override def shouldRetry( + lastState: Any, + lastEmittedElement: Option[Any], + lastFailure: Option[Throwable], + ): Option[Nothing] = None + } + @SuppressWarnings(Array("org.wartremover.wart.AsInstanceOf")) + def never[S, A]: RetrySourcePolicy[S, A] = NEVER.asInstanceOf[RetrySourcePolicy[S, A]] + } + + /** Creates a source from `mkSource` from the `initial` state. + * Whenever this source terminates, `policy` determines whether another source shall be constructed (after a given delay) from a possibly new state. + * The returned source concatenates the output of all the constructed sources in order. + * At most one constructed source is active at any given point in time. + * + * Failures in the constructed sources are passed to the `policy`, but do not make it downstream. + * The `policy` is responsible for properly logging these errors if necessary. + * + * @return The concatenation of all constructed sources. + * This source is NOT a blueprint and MUST therefore be materialized at most once. + * Its materialized value provides a kill switch to stop retrying. + * Only the [[org.apache.pekko.stream.KillSwitch.shutdown]] method should be used; + * The switch does not short-circuit the already constructed sources though. + * synchronization may not work correctly with [[org.apache.pekko.stream.KillSwitch.abort]]. + * Downstream should not cancel; use the kill switch instead. + * + * The materialized [[scala.concurrent.Future]] can be used to synchronize on the computations for restarts: + * if the source is stopped with the kill switch, the future completes after the computations have finished. + */ + def restartSource[S: Pretty, A]( + name: String, + initial: S, + mkSource: S => Source[A, (KillSwitch, Future[Done])], + policy: RetrySourcePolicy[S, A], + )(implicit + loggingContext: NamedLoggingContext, + materializer: Materializer, + ): Source[WithKillSwitch[A], (KillSwitch, Future[Done])] = { + val directExecutionContext = DirectExecutionContext(loggingContext.tracedLogger) + + // Use immediate acknowledgements and buffer size 1 to minimize the risk that + // several materializations of the returned source concurrently restart stuff. + val (boundedSourceQueue, source) = Source.queue[S](bufferSize = 1).preMaterialize() + val flushFuture = new FlushFuture(s"RestartSource $name", loggingContext.loggerFactory) + + def idempotentComplete(): Unit = + try { + boundedSourceQueue.complete() + } catch { + case _: IllegalStateException => + } + + trait KillSwitchForRestartSource extends KillSwitch { + type Handle + + /** Register a function to be executed when the kill switch is pulled. + * + * @return A handle with which the function can be removed again using [[removeOnClose]]. + */ + def runOnClose(f: () => Unit): Handle + def removeOnClose(handle: Handle): Unit + } + + class KillSwitchForRestartSourceImpl extends KillSwitchForRestartSource { + override type Handle = AnyRef + + private val isClosing = new AtomicBoolean(false) + + private val completeOnClosing: scala.collection.concurrent.Map[Any, () => Unit] = + TrieMap.empty[Any, () => Unit] + + private def onClose(): Unit = { + isClosing.set(true) + completeOnClosing.foreach { case (_, f) => f() } + } + + def runOnClose(f: () => Unit): Handle = { + val handle = new Object() + completeOnClosing.put(handle, f).discard[Option[() => Unit]] + if (isClosing.get()) f() + handle + } + + def removeOnClose(handle: Handle): Unit = + completeOnClosing.remove(handle).discard[Option[() => Unit]] + + override def shutdown(): Unit = { + onClose() + idempotentComplete() + } + + override def abort(ex: Throwable): Unit = { + onClose() + try { + boundedSourceQueue.fail(ex) + } catch { + case _: IllegalStateException => + } + } + } + val killSwitchForSourceQueue: KillSwitchForRestartSource = new KillSwitchForRestartSourceImpl + + def restartFrom(nextState: S): Unit = { + loggingContext.debug(show"(Re)Starting the source $name from state $nextState") + boundedSourceQueue.offer(nextState) match { + case QueueOfferResult.Enqueued => + loggingContext.debug(s"Restarted the source $name with state $nextState") + case QueueOfferResult.Dropped => + // This should not happen + ErrorUtil.internalError( + new IllegalStateException( + s"Could not restart the source $name because the state queue is full. Has the returned source been materialized multiple times?" + ) + ) + case _: QueueCompletionResult => + loggingContext.debug( + s"Not restarting $name because the restart source has already been completed" + ) + } + } + + // Kick it off with the initial state + restartFrom(initial) + + source + .flatMapConcat { state => + val lastObservedElem: AtomicReference[Option[A]] = new AtomicReference[Option[A]](None) + val lastObservedError: AtomicReference[Option[Throwable]] = + new AtomicReference[Option[Throwable]](None) + + def observeSuccess(elem: Try[A]): Try[A] = { + elem.foreach(x => lastObservedElem.set(Some(x))) + elem + } + val observeError: Throwable PartialFunction Try[A] = { case NonFatal(ex) => + lastObservedError.set(Some(ex)) + Failure(ex) + } + + // flatMapConcat swallows the materialized value of the inner sources + // So we make them accessible to the retry directly. + def uponTermination(handleKillSwitch: killSwitchForSourceQueue.Handle, doneF: Future[Done]) + : NotUsed = { + val afterTerminationF = doneF + .thereafter { outcome => + ErrorUtil.requireArgument( + outcome.isSuccess, + s"RestartSource $name: recover did not catch the error $outcome", + ) + // Deregister the inner streams kill switch upon termination to prevent memory leaks + killSwitchForSourceQueue.removeOnClose(handleKillSwitch) + policy.shouldRetry(state, lastObservedElem.get, lastObservedError.get) match { + case Some((backoff, nextState)) => + implicit val ec: ExecutionContext = directExecutionContext + + val delayedPromise = Promise[UnlessShutdown[Unit]]() + val handleDelayedPromise = killSwitchForSourceQueue.runOnClose { () => + delayedPromise.trySuccess(AbortedDueToShutdown).discard[Boolean] + } + val delayedF = DelayUtil.delay(backoff).thereafter { _ => + killSwitchForSourceQueue.removeOnClose(handleDelayedPromise) + delayedPromise.trySuccess(Outcome(())).discard[Boolean] + } + FutureUtil.doNotAwait( + delayedF, + s"DelayUtil.delay for RestartSource $name failed", + ) + + val restartF = delayedPromise.future.map { + case AbortedDueToShutdown => + loggingContext.debug(s"Not restarting $name due to shutdown") + case Outcome(()) => restartFrom(nextState) + } + FutureUtil.doNotAwait( + restartF, + s"Restart future for RestartSource $name failed", + ) + case None => + loggingContext.debug(s"Not retrying $name any more. Completing the source.") + idempotentComplete() + } + }(materializer.executionContext) + .thereafter(_.forFailed { ex => + loggingContext.error( + s"The retry policy for RestartSource $name failed with an error. Stop retrying.", + ex, + ) + idempotentComplete() + })(materializer.executionContext) + flushFuture.addToFlushAndLogError(show"RestartSource ${name.unquoted} at state $state")( + afterTerminationF + ) + NotUsed + } + + mkSource(state) + // Register the kill switch of the new source with the kill switch of the restart source + .mapMaterializedValue { case (killSwitch, doneF) => + val handle = killSwitchForSourceQueue.runOnClose(() => killSwitch.shutdown()) + // The completion future terminates with an exception when the source itself aborts with the same exception + // Since it is the responsibility of the policy to triage such exceptions, we do not log it here. + flushFuture.addToFlushWithoutLogging( + show"RestartSource ${name.unquoted}: completion future of $state" + )(doneF) + handle + } + .map(Success.apply) + // Grab any upstream errors of the current source + // before they escape to the concatenated source and bypass the restart logic + .recover(observeError) + // Observe elements only after recovering from errors so that the error cannot jump over the map. + .map(observeSuccess) + // Do not use the `doneF` future from the source to initiate the retry + // because it is unclear how long `doneF` will take to complete after the source has terminated. + // Instead, decide on a retry eagerly as soon as we know that the last element of the source has been emitted + .watchTermination()(uponTermination) + } + // Filter out the exceptions from the recover + .mapConcat(_.toOption.map(WithKillSwitch(_)(killSwitchForSourceQueue))) + .watchTermination() { case (NotUsed, doneF) => + val everythingTerminatedF = + doneF.thereafterF { _ => + // Complete the queue of elements again, to make sure that + // downstream cancellations do not race with a restart. + idempotentComplete() + flushFuture.flush() + }( + // The direct execution context ensures that this runs as soon as the future's promise is completed, + // i.e., a downstream cancellation signal cannot propagate upstream while this is running. + directExecutionContext + ) + killSwitchForSourceQueue -> everythingTerminatedF + } + } + + /** Adds a [[org.apache.pekko.stream.KillSwitches.single]] into the stream after the given source + * and injects the created kill switch into the stream + */ + def withUniqueKillSwitch[A, Mat, Mat2]( + graph: FlowOpsMat[A, Mat] + )(mat: (Mat, UniqueKillSwitch) => Mat2): graph.ReprMat[WithKillSwitch[A], Mat2] = { + withMaterializedValueMat(new AtomicReference[UniqueKillSwitch])(graph)(Keep.both) + .viaMat(KillSwitches.single) { case ((m, ref), killSwitch) => + ref.set(killSwitch) + mat(m, killSwitch) + } + .map { case (a, ref) => WithKillSwitch(a)(ref.get()) } + } + + def injectKillSwitch[A, Mat]( + graph: FlowOpsMat[A, Mat] + )(killSwitch: Mat => KillSwitch): graph.ReprMat[WithKillSwitch[A], Mat] = { + withMaterializedValueMat(new AtomicReference[KillSwitch])(graph)(Keep.both) + .mapMaterializedValue { case (mat, ref) => + ref.set(killSwitch(mat)) + mat + } + .map { case (a, ref) => WithKillSwitch(a)(ref.get()) } + } + + private[util] def withMaterializedValueMat[M, A, Mat, Mat2](create: => M)( + graph: FlowOpsMat[A, Mat] + )(combine: (Mat, M) => Mat2): graph.ReprMat[(A, M), Mat2] = + graph.viaMat(new WithMaterializedValue[M, A](() => create))(combine) + + private def logOnThrow[A](logger: Logger, name: => String)(action: => A): A = + try { action } + catch { + case NonFatal(e) => + logger.error(s"$name failed", e) + throw e + } + + /** Pekko by default swallows exceptions thrown in [[org.apache.pekko.stream.stage.OutHandler]]s. + * This wrapper makes sure that they are logged. + */ + class LoggingOutHandler(delegate: OutHandler, logger: Logger, name: String) extends OutHandler { + override def onPull(): Unit = + logOnThrow(logger, s"$name onPull")(delegate.onPull()) + + override def onDownstreamFinish(cause: Throwable): Unit = + logOnThrow(logger, s"$name onDownstreamFinish")(delegate.onDownstreamFinish(cause)) + } + object LoggingOutHandler { + def apply(logger: Logger, name: String)(delegate: OutHandler): OutHandler = + new LoggingOutHandler(delegate, logger, name) + } + + /** Pekko by default swallows exceptions thrown in [[org.apache.pekko.stream.stage.InHandler]]s. + * This wrapper makes sure that they are logged. + */ + class LoggingInHandler(delegate: InHandler, logger: Logger, name: String) extends InHandler { + override def onPush(): Unit = + logOnThrow(logger, s"$name onPush")(delegate.onPush()) + + override def onUpstreamFinish(): Unit = + logOnThrow(logger, s"$name onUpstreamFinish")(delegate.onUpstreamFinish()) + + override def onUpstreamFailure(ex: Throwable): Unit = + logOnThrow(logger, s"$name onUpstreamFailure")(delegate.onUpstreamFailure(ex)) + } + object LoggingInHandler { + def apply(logger: Logger, name: String)(delegate: InHandler): InHandler = + new LoggingInHandler(delegate, logger, name) + } + + /** Pekko by default swallows exceptions thrown in async callbacks. + * This wrapper makes sure that they are logged. + */ + def loggingAsyncCallback[A](logger: Logger, name: String)(asyncCallback: A => Unit): A => Unit = + x => logOnThrow(logger, name)(asyncCallback(x)) + + /** Creates a value upon materialization that is added to every element of the stream. + * + * WARNING: This flow breaks the synchronization abstraction of Pekko streams, + * as the created value is accessible from within the stream and from the outside through the materialized value. + * Users of this flow must make sure that accessing the value is thread-safe! + */ + private class WithMaterializedValue[M, A](create: () => M) + extends GraphStageWithMaterializedValue[FlowShape[A, (A, M)], M] { + private val in: Inlet[A] = Inlet[A]("withMaterializedValue.in") + private val out: Outlet[(A, M)] = Outlet[(A, M)]("withMaterializedValue.out") + override val shape: FlowShape[A, (A, M)] = FlowShape(in, out) + + override def initialAttributes: Attributes = Attributes.name("withMaterializedValue") + + override def createLogicAndMaterializedValue( + inheritedAttributes: Attributes + ): (GraphStageLogic, M) = { + val m: M = create() + val logic = new GraphStageLogic(shape) with InHandler with OutHandler { + override def onPush(): Unit = push(out, grab(in) -> m) + + override def onPull(): Unit = pull(in) + + setHandlers(in, out, this) + } + (logic, m) + } + } + + /** Container class for adding a [[org.apache.pekko.stream.KillSwitch]] to a single value. + * Two containers are equal if their contained values are equal. + * + * (Equality ignores the [[org.apache.pekko.stream.KillSwitch]]es because it is usually not very meaningful. + * The [[org.apache.pekko.stream.KillSwitch]] is therefore in the second argument list.) + */ + final case class WithKillSwitch[+A](private val value: A)(val killSwitch: KillSwitch) { + def unwrap: A = value + def map[B](f: A => B): WithKillSwitch[B] = copy(f(value)) + def traverse[F[_], B](f: A => F[B])(implicit F: Functor[F]): F[WithKillSwitch[B]] = + F.map(f(value))(copy) + def copy[B](value: B = this.value): WithKillSwitch[B] = WithKillSwitch(value)(killSwitch) + } + object WithKillSwitch { + implicit val traverseWithKillSwitch: Traverse[WithKillSwitch] = new Traverse[WithKillSwitch] { + override def traverse[F[_], A, B](fa: WithKillSwitch[A])(f: A => F[B])(implicit + F: Applicative[F] + ): F[WithKillSwitch[B]] = fa.traverse(f) + + override def foldLeft[A, B](fa: WithKillSwitch[A], b: B)(f: (B, A) => B): B = f(b, fa.unwrap) + + override def foldRight[A, B](fa: WithKillSwitch[A], lb: Eval[B])( + f: (A, Eval[B]) => Eval[B] + ): Eval[B] = f(fa.unwrap, lb) + } + } + + /** Passes through all elements of the source until and including the first element that satisfies the condition. + * Thereafter pulls the kill switch of the first such element and drops all remaining elements of the source. + * + * '''Emits when''' upstream emits and all previously emitted elements do not meet the condition. + * + * '''Backpressures when''' downstream backpressures + * + * '''Completes when upstream''' completes + * + * '''Cancels when''' downstream cancels + */ + def takeUntilThenDrain[A, Mat]( + graph: FlowOps[WithKillSwitch[A], Mat], + condition: A => Boolean, + ): graph.Repr[WithKillSwitch[A]] = + graph.statefulMapConcat(() => { + @SuppressWarnings(Array("org.wartremover.warts.Var")) + var draining = false + elem => { + if (draining) Iterable.empty[WithKillSwitch[A]] + else { + if (condition(elem.unwrap)) { + draining = true + elem.killSwitch.shutdown() + } + Iterable.single(elem) + } + } + }) + + object syntax { + + /** Defines extension methods for [[org.apache.pekko.stream.scaladsl.FlowOpsMat]] that map to the methods defined in this class. + * + * The construction with type parameter `U` follows + * Stephen's blog post about relatable variables + * to ensure that we can uniformly abstract over [[org.apache.pekko.stream.scaladsl.Source]]s and [[org.apache.pekko.stream.scaladsl.Flow]]s. + * In particular, we cannot use an implicit class here. Unlike in the blog post, the implicit conversion [[pekkoUtilSyntaxForFlowOps]] + * does not extract [[org.apache.pekko.stream.scaladsl.FlowOpsMat]] into a separate type parameter because this would confuse + * type inference. + */ + private[util] class PekkoUtilSyntaxForFlowOps[A, Mat, U <: FlowOps[A, Mat]]( + private val graph: U + ) extends AnyVal { + def remember(window: NonNegativeInt): U#Repr[NonEmpty[Seq[A]]] = + PekkoUtil.remember(graph, window) + + def statefulMapAsync[S, T](initial: S)( + f: (S, A) => Future[(S, T)] + )(implicit loggingContext: NamedLoggingContext): U#Repr[T] = + PekkoUtil.statefulMapAsync(graph, initial)(f) + + def statefulMapAsyncUS[S, T](initial: S)( + f: (S, A) => FutureUnlessShutdown[(S, T)] + )(implicit loggingContext: NamedLoggingContext): U#Repr[UnlessShutdown[T]] = + PekkoUtil.statefulMapAsyncUS(graph, initial)(f) + + def mapAsyncUS[B](parallelism: Int)(f: A => FutureUnlessShutdown[B])(implicit + loggingContext: NamedLoggingContext + ): U#Repr[UnlessShutdown[B]] = + PekkoUtil.mapAsyncUS(graph, parallelism)(f) + + def mapAsyncAndDrainUS[B](parallelism: Int)( + f: A => FutureUnlessShutdown[B] + )(implicit loggingContext: NamedLoggingContext): U#Repr[B] = + PekkoUtil.mapAsyncAndDrainUS(graph, parallelism)(f) + } + // Use separate implicit conversions for Sources and Flows to help IntelliJ + // Otherwise IntelliJ gets very resource hungry. + implicit def pekkoUtilSyntaxForFlowOpsSource[A, Mat]( + graph: Source[A, Mat] + ): PekkoUtilSyntaxForFlowOps[A, Mat, graph.type] = + new PekkoUtilSyntaxForFlowOps(graph) + implicit def pekkoUtilSyntaxForFlowOpsFlow[A, B, Mat]( + graph: Flow[A, B, Mat] + ): PekkoUtilSyntaxForFlowOps[B, Mat, graph.type] = + new PekkoUtilSyntaxForFlowOps(graph) + + /** Defines extension methods for [[org.apache.pekko.stream.scaladsl.FlowOps]] with a [[org.apache.pekko.stream.KillSwitch]]. + * @see PekkoUtilSyntaxForFlowOps for an explanation of the type parameter U + */ + private[util] class PekkoUtilSyntaxForFLowOpsWithKillSwitch[ + A, + Mat, + U <: FlowOps[WithKillSwitch[A], Mat], + ](private val graph: U) + extends AnyVal { + def takeUntilThenDrain(condition: A => Boolean): U#Repr[WithKillSwitch[A]] = + PekkoUtil.takeUntilThenDrain(graph, condition) + } + // Use separate implicit conversions for Sources and Flows to help IntelliJ + // Otherwise IntelliJ gets very resource hungry. + implicit def pekkoUtilSyntaxForFlowOpsWithKillSwitchSource[A, Mat]( + graph: Source[WithKillSwitch[A], Mat] + ): PekkoUtilSyntaxForFLowOpsWithKillSwitch[A, Mat, graph.type] = + new PekkoUtilSyntaxForFLowOpsWithKillSwitch(graph) + implicit def pekkoUtilSyntaxForFlowOpsWithKillSwitchFlow[A, B, Mat]( + graph: Flow[A, WithKillSwitch[B], Mat] + ): PekkoUtilSyntaxForFLowOpsWithKillSwitch[B, Mat, graph.type] = + new PekkoUtilSyntaxForFLowOpsWithKillSwitch(graph) + + /** Defines extension methods for [[org.apache.pekko.stream.scaladsl.FlowOpsMat]] that map to the methods defined in this class. + * @see PekkoUtilSyntaxForFlowOps for an explanation of the type parameter U + */ + private[util] class PekkoUtilSyntaxForFlowOpsMat[A, Mat, U <: FlowOpsMat[A, Mat]]( + private val graph: U + ) extends AnyVal { + + private[util] def withMaterializedValueMat[M, Mat2](create: => M)( + mat: (Mat, M) => Mat2 + ): U#ReprMat[(A, M), Mat2] = + PekkoUtil.withMaterializedValueMat(create)(graph)(mat) + + def withUniqueKillSwitchMat[Mat2]( + )(mat: (Mat, UniqueKillSwitch) => Mat2): U#ReprMat[WithKillSwitch[A], Mat2] = + PekkoUtil.withUniqueKillSwitch(graph)(mat) + + def injectKillSwitch(killSwitch: Mat => KillSwitch): U#ReprMat[WithKillSwitch[A], Mat] = + PekkoUtil.injectKillSwitch(graph)(killSwitch) + } + // Use separate implicit conversions for Sources and Flows to help IntelliJ + // Otherwise IntelliJ gets very resource hungry. + implicit def pekkoUtilSyntaxForFlowOpsMatSource[A, Mat]( + graph: Source[A, Mat] + ): PekkoUtilSyntaxForFlowOpsMat[A, Mat, graph.type] = + new PekkoUtilSyntaxForFlowOpsMat(graph) + implicit def pekkoUtilSyntaxForFlowOpsMat[A, B, Mat]( + graph: Flow[A, B, Mat] + ): PekkoUtilSyntaxForFlowOpsMat[B, Mat, graph.type] = + new PekkoUtilSyntaxForFlowOpsMat(graph) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/PriorityBlockingQueueUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/PriorityBlockingQueueUtil.scala new file mode 100644 index 0000000000..dea23b3fee --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/PriorityBlockingQueueUtil.scala @@ -0,0 +1,10 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +object PriorityBlockingQueueUtil { + + /** Default initial capacity of [[java.util.concurrent.PriorityBlockingQueue]] as defined in its `DEFAULT_INITIAL_CAPACITY` */ + val DefaultInitialCapacity: Int = 11 +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ResourceUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ResourceUtil.scala new file mode 100644 index 0000000000..9045ef751e --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ResourceUtil.scala @@ -0,0 +1,92 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.MonadThrow +import cats.data.EitherT + +import scala.concurrent.{ExecutionContext, Future} +import scala.util.Try +import scala.util.control.NonFatal + +/** Utility code for doing proper resource management. + * A lot of it is based on https://medium.com/@dkomanov/scala-try-with-resources-735baad0fd7d + */ +object ResourceUtil { + + /** Does resource management the same way as [[withResource]], but returns an Either instead of throwing exceptions. + * + * @param r resource that will be used to derive some value and will be closed automatically in the end + * @param f function that will be applied to the resource and can possibly throw exceptions + * @return Either object that contains a Right with the mapped value or a Left with the thrown exception from either + * the function or the call to the resource's close method. + */ + def withResourceEither[T <: AutoCloseable, V](r: => T)(f: T => V): Either[Throwable, V] = + try { + Right(withResource(r)(f)) + } catch { + case NonFatal(e) => + Left(e) + } + + /** The given function is applied to the resource and returned. + * Resource closing is done automatically after the function is applied. + * This will rethrow any exception thrown by the given function or the call to the resource's close method, + * but the resource will be attempted to close no matter what. + * + * @param r resource that will be used to derive some value and will be closed automatically in the end + * @param f function that will be applied to the resource and can possibly throw exceptions + * @return the result of the given function applied to the resource + */ + @SuppressWarnings(Array("org.wartremover.warts.Var")) + def withResource[T <: AutoCloseable, V](r: => T)(f: T => V): V = { + val resource: T = r + var exception: Option[Throwable] = None + try { + f(resource) + } catch { + case NonFatal(e) => + exception = Some(e) + throw e + } finally { + closeAndAddSuppressed(exception, resource) + } + } + + final private[util] class ResourceMonadApplied[M[_]]( + private val dummy: Boolean = true + ) extends AnyVal { + def apply[T <: AutoCloseable, V](r: => T)( + f: T => M[V] + )(implicit M: MonadThrow[M], TM: Thereafter[M], executionContext: ExecutionContext): M[V] = { + import Thereafter.syntax.* + import cats.syntax.flatMap.* + MonadThrow[M].fromTry(Try(f(r))).flatten.thereafter(_ => r.close()) + } + } + + def withResourceM[M[_]]: ResourceMonadApplied[M] = new ResourceMonadApplied[M] + + def withResourceEitherT[T <: AutoCloseable, E, V, F[_]](r: => T)(f: T => EitherT[Future, E, V])( + implicit ec: ExecutionContext + ): EitherT[Future, E, V] = { + withResourceM(r)(f) + } + + def withResourceFuture[T <: AutoCloseable, V](r: => T)(f: T => Future[V])(implicit + ec: ExecutionContext + ): Future[V] = { + withResourceM(r)(f) + } + + def closeAndAddSuppressed(e: Option[Throwable], resource: AutoCloseable): Unit = + e.fold(resource.close()) { exception => + try { + resource.close() + } catch { + case NonFatal(suppressed) => + exception.addSuppressed(suppressed) + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ShowUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ShowUtil.scala new file mode 100644 index 0000000000..a9ae5b6b8d --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/ShowUtil.scala @@ -0,0 +1,153 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.Show +import cats.Show.Shown +import com.digitalasset.canton.config.CantonRequireTypes.{ + LengthLimitedString, + LengthLimitedStringWrapper, +} +import com.digitalasset.canton.logging.pretty.Pretty + +import scala.annotation.tailrec + +/** Utility class for clients who want to '''make use''' of pretty printing. + * Import this as follows: + *
+  * import com.digitalasset.canton.util.ShowUtil._
+  * 
+ * In some cases, an import at the top of the file will not make the `show` interpolator available. + * To work around this, you need to import this INSIDE of the using class. + * + * To enforce pretty printing, the `show` interpolator should be used for creating strings. + * That is, `show"s\$myComplexObject"` will result in a compile error, + * if pretty printing is not implemented for `myComplexObject`. + * In contrast, `s"\$myComplexObject"` will fall back to the default (non-pretty) toString implementation, + * if pretty printing is not implemented for `myComplexObject`. + * Even if pretty printing is implemented for the type `T` of `myComplexObject`, + * `s"\$myComplexObject"` will not use it, if the compiler fails to infer `T: Pretty`. + */ +object ShowUtil extends ShowUtil { + val NL: Shown = Shown(System.lineSeparator()) + val HashLength = 12 +} + +trait ShowUtil extends cats.syntax.ShowSyntax { + import ShowUtil.* + + /** Enables the syntax `show"This object is pretty: \$myPrettyType"`. + */ + implicit def showPretty[T: Pretty]: Show[T] = { + import Pretty.PrettyOps + Show.show(_.toPrettyString()) + } + + /** Enables syntax like + * `show"This is a string: \${myString.doubleQuoted}"` + * and + * `show"This is a hash: \${myHash.readableHash}"`. + */ + abstract class StringOperators(s: String) { + + /** Use this to quote names. (E.g. Domain 'myDomain') + */ + def singleQuoted: Shown = Shown("'" + s + "'") + + /** Use this to quote string constants, to separate them from the embedding sentence. + * (E.g. the request failed with "index out of bounds".) + */ + def doubleQuoted: Shown = Shown("\"" + s + "\"") + + def unquoted: Shown = Shown(s) + + def readableHash: Shown = + Shown(if (s.length <= HashLength) s else s.take(HashLength) + "...") + + def limit(maxLength: Int): Shown = + Shown(if (s.length <= maxLength) s else s.take(maxLength) + "...") + + def readableLoggerName(maxLength: Int): Shown = { + @tailrec + def go(result: String): String = { + if (result.length <= maxLength) { + result + } else { + val newResult = result.replaceFirst("^(([a-z]\\.)*[a-z])[a-zA-Z0-9-]*", "$1") + if (newResult == result) { + result + } else { + go(newResult) + } + } + } + + Shown(go(s)) + } + } + + implicit class ShowStringSyntax(s: String) extends StringOperators(s) + implicit class ShowLengthLimitedStringSyntax(s: LengthLimitedString) + extends StringOperators(s.str) + implicit class ShowLengthLimitedStringWrapperSyntax(s: LengthLimitedStringWrapper) + extends StringOperators(s.unwrap) + + /** Enables the syntax `show"\${myEither.showMerged}"`. + */ + implicit class ShowEitherSyntax[L: Show, R: Show](e: Either[L, R]) { + + /** Prints the (left or right) value of an either without indicating whether it is left or right. + */ + def showMerged: Shown = Shown(e.fold(_.show, _.show)) + } + + implicit class ShowOptionSyntax[T: Show](o: Option[T]) { + def showValueOrNone: Shown = + Shown(o match { + case Some(value) => value.show + case None => "None" + }) + + def showValue: Shown = + Shown(o match { + case Some(value) => value.show + case None => "" + }) + } + + /** Enables syntax like `show"Found several elements: \${myCollection.mkShow()}"`. + */ + implicit class ShowTraversableSyntax[T: Show](trav: Iterable[T]) { + + /** Like `IterableOnce.mkString(String)` with the difference that + * individual elements are mapped to strings by using `show` is used instead of `toString`. + */ + def mkShow(sep: String = ", "): Shown = Shown(trav.map(_.show).mkString(sep)) + + /** Like `TraversableOnce.mkString(String, String, String)` with the difference that + * individual elements are mapped to strings by using `show` is used instead of `toString`. + */ + def mkShow(start: String, sep: String, end: String): Shown = Shown( + trav.map(_.show).mkString(start, sep, end) + ) + + def limit(n: Int): Iterable[Shown] = { + val (prefix, remainder) = trav.splitAt(n) + val ellipsis = if (remainder.isEmpty) Seq.empty else Seq(Shown("...")) + prefix.map(e => e: Shown) ++ ellipsis + } + } + + @SuppressWarnings(Array("org.wartremover.warts.Product")) + implicit class ShowProductSyntax(any: Product) { + def showWithAdHocPrettyInstance: Shown = { + implicit val prettyAny: Pretty[Product] = Pretty.adHocPrettyInstance + Shown(any.show) + } + } + + implicit class ShowAnyRefSyntax(anyRef: AnyRef) { + def showType: Shown = Shown(anyRef.getClass.getSimpleName) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/SimpleExecutionQueue.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/SimpleExecutionQueue.scala new file mode 100644 index 0000000000..889ccc43d9 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/SimpleExecutionQueue.scala @@ -0,0 +1,347 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.data.EitherT +import com.digitalasset.canton.concurrent.{DirectExecutionContext, FutureSupervisor} +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown +import com.digitalasset.canton.lifecycle.* +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.canton.util.SimpleExecutionQueue.TaskCell +import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.TryUtil.* + +import java.util.concurrent.atomic.AtomicReference +import scala.annotation.tailrec +import scala.concurrent.Future +import scala.concurrent.duration.Duration +import scala.util.{Failure, Success, Try} + +/** Functions executed with this class will only run when all previous calls have completed executing. + * This can be used when async code should not be run concurrently. + * + * The default semantics is that a task is only executed if the previous tasks have completed successfully, i.e., + * they did not fail nor was the task aborted due to shutdown. + * + * If the queue is shutdown, the tasks' execution is aborted due to shutdown too. + */ +class SimpleExecutionQueue( + name: String, + futureSupervisor: FutureSupervisor, + override val timeouts: ProcessingTimeout, + override val loggerFactory: NamedLoggerFactory, + logTaskTiming: Boolean = false, +) extends PrettyPrinting + with NamedLogging + with FlagCloseableAsync { + + protected val directExecutionContext: DirectExecutionContext = + DirectExecutionContext(noTracingLogger) + + /** Will execute the given function after all previous executions have completed successfully and return the + * future with the result of this execution. + */ + def execute[A](execution: => Future[A], description: String)(implicit + loggingContext: ErrorLoggingContext + ): FutureUnlessShutdown[A] = + genExecute( + runIfFailed = false, + FutureUnlessShutdown.outcomeF(execution)(directExecutionContext), + description, + ) + + def executeE[A, B]( + execution: => EitherT[Future, A, B], + description: String, + )(implicit loggingContext: ErrorLoggingContext): EitherT[FutureUnlessShutdown, A, B] = + EitherT(execute(execution.value, description)) + + def executeEUS[A, B]( + execution: => EitherT[FutureUnlessShutdown, A, B], + description: String, + )(implicit loggingContext: ErrorLoggingContext): EitherT[FutureUnlessShutdown, A, B] = + EitherT(executeUS(execution.value, description)) + + def executeUS[A]( + execution: => FutureUnlessShutdown[A], + description: String, + runWhenUnderFailures: => Unit = (), + )(implicit + loggingContext: ErrorLoggingContext + ): FutureUnlessShutdown[A] = + genExecute(runIfFailed = false, execution, description, runWhenUnderFailures) + + def executeUnderFailuresUS[A](execution: => FutureUnlessShutdown[A], description: String)(implicit + loggingContext: ErrorLoggingContext + ): FutureUnlessShutdown[A] = + genExecute(runIfFailed = true, execution, description) + + private def genExecute[A]( + runIfFailed: Boolean, + execution: => FutureUnlessShutdown[A], + description: String, + runWhenUnderFailures: => Unit = (), + )(implicit loggingContext: ErrorLoggingContext): FutureUnlessShutdown[A] = { + val next = new TaskCell(description, logTaskTiming, futureSupervisor, directExecutionContext) + val oldHead = queueHead.getAndSet(next) // linearization point + next.chain( + oldHead, + runIfFailed, + // Only run the task when the queue is not shut down + performUnlessClosingUSF(s"queued task: $description")(execution)( + directExecutionContext, + loggingContext.traceContext, + ), + runWhenUnderFailures, + ) + } + + /** Returns a future that completes when all scheduled tasks up to now have completed or after a shutdown has been initiated. Never fails. */ + def flush(): Future[Unit] = + queueHead + .get() + .future + .onShutdown(())(directExecutionContext) + .recover { exception => + logger.debug(s"Flush has failed, however returning success", exception)( + TraceContext.empty + ) + }(directExecutionContext) + + private val queueHead: AtomicReference[TaskCell] = + new AtomicReference[TaskCell](TaskCell.sentinel(directExecutionContext)) + + /** slow and in-efficient queue size, to be used for inspection */ + def queueSize: Int = { + @tailrec + def go(cell: TaskCell, count: Int): Int = cell.predecessor match { + case None => count + case Some(predCell) => go(predCell, count + 1) + } + go(queueHead.get(), 0) + } + + /** Returns a sequence of tasks' descriptions in this execution queue. + * The first entry refers to the last known completed task, + * the others are running or queued. + */ + def queued: Seq[String] = { + @tailrec + def go(cell: TaskCell, descriptions: List[String]): List[String] = { + cell.predecessor match { + case None => s"${cell.description} (completed)" :: descriptions + case Some(predCell) => go(predCell, cell.description :: descriptions) + } + } + go(queueHead.get(), List.empty[String]) + } + + override def pretty: Pretty[SimpleExecutionQueue] = prettyOfClass( + param("queued tasks", _.queued.map(_.unquoted)) + ) + + private def forceShutdownTasks(): Unit = { + @tailrec + def go(cell: TaskCell, nextTaskAfterRunningOne: Option[TaskCell]): Option[TaskCell] = { + // If the predecessor of the cell is completed, then it is the running task, in which case we stop the recursion. + // Indeed the predecessor of the running task is only set to None when the task has completed, so we need to + // access the predecessor and check if it's done. There is a potential race because by the time we reach the supposed + // first task after the running one and shut it down, it might already have started if the running task finished in the meantime. + // This is fine though because tasks are wrapped in a performUnlessShutdown so the task will be `AbortedDueToShutdown` anyway + // instead of actually start, so the race is benign. + if (cell.predecessor.exists(_.future.unwrap.isCompleted)) { + errorLoggingContext(TraceContext.empty).debug( + s"${cell.description} is still running. It will be left running but all subsequent tasks will be aborted." + ) + nextTaskAfterRunningOne + } else { + cell.predecessor match { + case Some(predCell) => go(predCell, Some(cell)) + case _ => None + } + } + } + + // Find the first task queued after the currently running one and shut it down, this will trigger a cascade and + // `AbortDueToShutdown` all subsequent tasks + go(queueHead.get(), None).foreach(_.shutdown()) + } + + override protected def closeAsync(): Seq[AsyncOrSyncCloseable] = { + import TraceContext.Implicits.Empty.* + Seq( + AsyncCloseable( + s"simple-exec-queue: $name", + flush(), + timeouts.shutdownProcessing, + // In the event where the flush does not complete within the allocated timeout, + // forcibly shutdown the remaining queued tasks, except the currently running one + onTimeout = _ => forceShutdownTasks(), + ) + ) + } +} + +object SimpleExecutionQueue { + + /** Implements the chaining of tasks and their descriptions. */ + private class TaskCell( + val description: String, + logTaskTiming: Boolean, + futureSupervisor: FutureSupervisor, + directExecutionContext: DirectExecutionContext, + )(implicit errorLoggingContext: ErrorLoggingContext) { + + /** Completes after all earlier tasks and this task have completed. + * Fails with the exception of the first task that failed, if any. + */ + private val completionPromise: PromiseUnlessShutdown[Unit] = + new PromiseUnlessShutdown[Unit](description, futureSupervisor)( + errorLoggingContext, + directExecutionContext, + ) + + /** `null` if no predecessor has been chained. + * [[scala.Some$]]`(cell)` if the predecessor task is `cell` and this task is queued or running. + * [[scala.None$]] if this task has been completed. + */ + private val predecessorCell: AtomicReference[Option[TaskCell]] = + new AtomicReference[Option[TaskCell]]() + + private val taskCreationTime: Long = if (logTaskTiming) System.nanoTime() else 0L + + /** Chains this task cell after its predecessor `pred`. */ + /* The linearization point in the caller `genExecute` has already determined the sequencing of tasks + * if they are enqueued concurrently. So it now suffices to make sure that this task's future executes after + * `pred` (unless the previous task's future failed and `runIfFailed` is false) and that + * we cut the chain to the predecessor thereafter. + */ + @SuppressWarnings(Array("org.wartremover.warts.Null")) + def chain[A]( + pred: TaskCell, + runIfFailed: Boolean, + execution: => FutureUnlessShutdown[A], + runWhenUnderFailures: => Unit, + )(implicit + loggingContext: ErrorLoggingContext + ): FutureUnlessShutdown[A] = { + val succeed = predecessorCell.compareAndSet(null, Some(pred)) + ErrorUtil.requireState(succeed, s"Attempt to chain task $description several times.")( + loggingContext + ) + + def runTask( + propagatedException: Option[Throwable] + ): FutureUnlessShutdown[(Option[Throwable], A)] = { + if (logTaskTiming && loggingContext.logger.underlying.isDebugEnabled) { + val startTime = System.nanoTime() + val waitingDelay = Duration.fromNanos(startTime - taskCreationTime) + loggingContext.debug( + show"Running task ${description.singleQuoted} after waiting for $waitingDelay" + ) + execution.transform { result => + val finishTime = System.nanoTime() + val runningDuration = Duration.fromNanos(finishTime - startTime) + val resultStr = result match { + case Failure(_exception) => "failed" + case Success(UnlessShutdown.Outcome(_result)) => "completed" + case Success(UnlessShutdown.AbortedDueToShutdown) => "aborted" + } + loggingContext.debug( + show"Task ${description.singleQuoted} finished as $resultStr after $runningDuration running time and $waitingDelay waiting time" + ) + result.map(r => r.map(a => (propagatedException, a))) + }(directExecutionContext) + } else { + execution.map(a => (propagatedException, a))(directExecutionContext) + } + } + + val chained = pred.future.transformWith { + case Success(UnlessShutdown.Outcome(_result)) => + runTask(None) + case Success(UnlessShutdown.AbortedDueToShutdown) => + FutureUnlessShutdown.abortedDueToShutdown + case Failure(ex) => + // Propagate the exception `ex` from an earlier task + if (runIfFailed) runTask(Some(ex)) + else { + if (logTaskTiming && loggingContext.logger.underlying.isDebugEnabled) { + val startTime = System.nanoTime() + val waitingDelay = Duration.fromNanos(startTime - taskCreationTime) + loggingContext.logger.debug( + s"Not running task ${description.singleQuoted} due to exception after waiting for $waitingDelay" + )(loggingContext.traceContext) + } + Try(runWhenUnderFailures).forFailed(e => + loggingContext.logger.debug( + s"Failed to run 'runWhenUnderFailures' function for ${description.singleQuoted}", + e, + )(loggingContext.traceContext) + ) + FutureUnlessShutdown.failed(ex) + } + }(directExecutionContext) + val completed = chained.thereafter { _ => + // Cut the predecessor as we're now done. + predecessorCell.set(None) + }(directExecutionContext) + val propagatedException = completed.flatMap { case (earlierExceptionO, _) => + earlierExceptionO.fold(FutureUnlessShutdown.unit)(FutureUnlessShutdown.failed) + }(directExecutionContext) + completionPromise.completeWith(propagatedException) + + // In order to be able to manually shutdown a task using its completionPromise, we semantically "check" that + // completionPromise hasn't already be completed with AbortedDueToShutdown, and if not we return the computation + // result. Note that we need to be sure that completionPromise will be fulfilled when + // 'completed' is, which is done just above + completionPromise.futureUS.transformWith { + case Success(AbortedDueToShutdown) => FutureUnlessShutdown.abortedDueToShutdown + case _ => completed.map(_._2)(directExecutionContext) + }(directExecutionContext) + } + + /** The returned future completes after this task has completed or a shutdown has occurred. + * If the task is not supposed to run if an earlier task has failed or was shutdown, + * then this task completes when all earlier tasks have completed without being actually run. + */ + def future: FutureUnlessShutdown[Unit] = completionPromise.futureUS + + /** Returns the predecessor task's cell or [[scala.None$]] if this task has already been completed. */ + def predecessor: Option[TaskCell] = { + // Wait until the predecessor cell has been set. + @SuppressWarnings(Array("org.wartremover.warts.Null")) + @tailrec def go(): Option[TaskCell] = { + val pred = predecessorCell.get() + if (pred eq null) go() else pred + } + go() + } + + def shutdown(): Unit = { + errorLoggingContext.warn(s"Forcibly completing $description with AbortedDueToShutdown") + completionPromise.shutdown() + } + } + + private object TaskCell { + + /** Sentinel task cell that is already completed. */ + def sentinel(directExecutionContext: DirectExecutionContext): TaskCell = { + // We don't care about the logging context here because the promise is already completed + import TraceContext.Implicits.Empty.* + val errorLoggingContext = ErrorLoggingContext.fromTracedLogger(NamedLogging.noopLogger) + val cell = new TaskCell("sentinel", false, FutureSupervisor.Noop, directExecutionContext)( + errorLoggingContext + ) + cell.predecessorCell.set(None) + cell.completionPromise.outcome(()) + cell + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/SingleUseCell.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/SingleUseCell.scala new file mode 100644 index 0000000000..470edfcfc1 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/SingleUseCell.scala @@ -0,0 +1,36 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import java.util.concurrent.atomic.AtomicReference + +/** This class provides a mutable container for a single value of type `A`. + * The value may be put at most once. A [[SingleUseCell]] therefore provides the following immutability guarantee: + * The value of a cell cannot change; once it has been put there, it will remain in the cell. + */ +class SingleUseCell[A] { + private val content: AtomicReference[Option[A]] = new AtomicReference[Option[A]](None) + + /** Returns whether the value has not yet been set */ + def isEmpty: Boolean = content.get.isEmpty + + /** Inserts the given value into the cell if it was empty before. + * Otherwise returns the content. + * + * @return The previous value or [[scala.None$]] if the cell was empty. + */ + def putIfAbsent(x: A): Option[A] = + if (content.compareAndSet(None, Some(x))) None else content.get() + + /** Returns the contents of the cell, if any. */ + def get: Option[A] = content.get + + /** Shorthand for `get.getOrElse` */ + def getOrElse[B >: A](x: => B): B = content.get.getOrElse(x) + + override def toString: String = content.get match { + case None => "Cell()" + case Some(x) => "Cell(" + x.toString + ")" + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/StackTraceUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/StackTraceUtil.scala new file mode 100644 index 0000000000..e4c44e11ed --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/StackTraceUtil.scala @@ -0,0 +1,32 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +object StackTraceUtil { + + def formatStackTrace(filter: Thread => Boolean = _ => true): String = { + import scala.jdk.CollectionConverters.* + Thread.getAllStackTraces.asScala.toMap + .filter { case (thread, _) => filter(thread) } + .map { case (thread, stackTrace) => + formatThread(thread) + formatStackTrace(stackTrace) + } + .mkString("\n") + } + + def formatThread(thread: Thread): String = + s" ${thread.toString} is-daemon=${thread.isDaemon} state=${thread.getState.toString}" + + def formatStackTrace(stackTrace: Array[StackTraceElement]): String = if (stackTrace.isEmpty) "" + else stackTrace.mkString("\n ", "\n ", "\n") + + def caller(offset: Int = 1): String = { + val stack = Thread.currentThread().getStackTrace + if (stack.lengthCompare(offset) > 0) { + val cal = stack(offset) + s"${cal.getFileName}:${cal.getLineNumber}" + } else "unknown" + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/Thereafter.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/Thereafter.scala new file mode 100644 index 0000000000..f0904dca51 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/Thereafter.scala @@ -0,0 +1,166 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import cats.data.{EitherT, OptionT} +import com.digitalasset.canton.util.TryUtil.* + +import scala.concurrent.{ExecutionContext, Future} +import scala.util.{Failure, Success, Try} + +/** Typeclass for computations with an operation that can run a side effect after the computation has finished. + * + * The typeclass abstracts the following patterns so that it can be used for types other than [[scala.concurrent.Future]]. + * {{{ + * future.transform { result => val () = body(result); result } // synchronous body + * future.transform { result => body(result).transform(_ => result) } // asynchronous body + * }}} + * + * Usage: + *
+  * import com.digitalasset.canton.util.Thereafter.syntax.*
+  *
+  * myAsyncComputation.thereafter(result => ...)
+  * 
+ * + * @tparam F The computation's type functor. + */ +trait Thereafter[F[_]] { + + /** The container type for computation result. */ + type Content[_] + + /** Runs `body` after the computation `f` has completed. + * @return The computation that results from chaining `f` before `body`. Completes only after `body` has run. + * If `body` completes normally, the result of the computation is the same as `f`'s result. + * If `body` throws, the result includes the thrown exception. + */ + def thereafter[A](f: F[A])(body: Content[A] => Unit)(implicit ec: ExecutionContext): F[A] + + /** runs `body` after the computation `f` has completed + * + * @return The computation that results from chaining `f` before `body`. Completes only after `body` has run. + * If `body` completes normally, the result of the computation is the same as `f`'s result. + * If `body` throws, the result includes the thrown exception. + * If `body` produces a failed computation, the result includes the thrown exception. + */ + def thereafterF[A](f: F[A])(body: Content[A] => Future[Unit])(implicit ec: ExecutionContext): F[A] +} + +object Thereafter { + + /** Make the dependent Content type explicit as a type argument to help with type inference. + * + * The construction is the same as in shapeless. + * [The Type Astronaut's Guide to Shapeless Book](https://underscore.io/books/shapeless-guide/) + * explains the idea in Chapter 4.2. + */ + type Aux[F[_], C[_]] = Thereafter[F] { type Content[A] = C[A] } + + def apply[F[_]](implicit F: Thereafter[F]): Thereafter.Aux[F, F.Content] = F + + /** [[Thereafter]] instance for [[scala.concurrent.Future]]s */ + object FutureThereafter extends Thereafter[Future] { + override type Content[A] = Try[A] + + override def thereafter[A]( + f: Future[A] + )(body: Try[A] => Unit)(implicit ec: ExecutionContext): Future[A] = + f.transform { + case result: Success[?] => + body(result) + result + case result @ Failure(resultEx) => + val bodyT = Try(body(result)) + // If the body throws an exception, add it as a suppressed exception to the result exception + bodyT.forFailed { bodyEx => + // Avoid an IllegalArgumentException if it's the same exception, + if (!(resultEx eq bodyEx)) resultEx.addSuppressed(bodyEx) + } + result + } + + override def thereafterF[A](f: Future[A])(body: Try[A] => Future[Unit])(implicit + ec: ExecutionContext + ): Future[A] = { + f.transformWith { + case result @ Success(success) => + body(result).map { (_: Unit) => success } + case result @ Failure(resultEx) => + Future.fromTry(Try(body(result))).flatten.transform { + case Success(_) => result + case Failure(bodyEx) => + if (!(resultEx eq bodyEx)) resultEx.addSuppressed(bodyEx) + result + } + } + } + } + implicit val futureThereafter: Thereafter.Aux[Future, Try] = FutureThereafter + + /** Use a type synonym instead of a type lambda so that the Scala compiler does not get confused during implicit resolution, + * at least for simple cases. + */ + type EitherTThereafterContent[Content[_], E, A] = Content[Either[E, A]] + + /** [[Thereafter]] instance lifted through [[cats.data.EitherT]]. */ + implicit def eitherTThereafter[F[_], E](implicit + F: Thereafter[F] + ): Thereafter.Aux[EitherT[F, E, *], EitherTThereafterContent[F.Content, E, *]] = + new Thereafter[EitherT[F, E, *]] { + override type Content[A] = EitherTThereafterContent[F.Content, E, A] + override def thereafter[A](f: EitherT[F, E, A])(body: Content[A] => Unit)(implicit + ec: ExecutionContext + ): EitherT[F, E, A] = + EitherT(F.thereafter(f.value)(body)) + + override def thereafterF[A](f: EitherT[F, E, A])( + body: Content[A] => Future[Unit] + )(implicit ec: ExecutionContext): EitherT[F, E, A] = + EitherT(F.thereafterF(f.value)(body)) + } + + /** Use a type synonym instead of a type lambda so that the Scala compiler does not get confused during implicit resolution, + * at least for simple cases. + */ + type OptionTThereafterContent[Content[_], A] = Content[Option[A]] + + /** [[Thereafter]] instance lifted through [[cats.data.OptionT]]. */ + implicit def optionTThereafter[F[_]](implicit + F: Thereafter[F] + ): Thereafter.Aux[OptionT[F, *], OptionTThereafterContent[F.Content, *]] = + new Thereafter[OptionT[F, *]] { + override type Content[A] = OptionTThereafterContent[F.Content, A] + + override def thereafter[A](f: OptionT[F, A])(body: Content[A] => Unit)(implicit + ec: ExecutionContext + ): OptionT[F, A] = + OptionT(F.thereafter(f.value)(body)) + + override def thereafterF[A](f: OptionT[F, A])( + body: Content[A] => Future[Unit] + )(implicit ec: ExecutionContext): OptionT[F, A] = + OptionT(F.thereafterF(f.value)(body)) + } + + trait Ops[F[_], C[_], A] extends Serializable { + protected def self: F[A] + val typeClassInstance: Thereafter.Aux[F, C] + def thereafter(body: C[A] => Unit)(implicit ec: ExecutionContext): F[A] = + typeClassInstance.thereafter(self)(body) + def thereafterF(body: C[A] => Future[Unit])(implicit ec: ExecutionContext): F[A] = + typeClassInstance.thereafterF(self)(body) + } + + /** Extension method for instances of [[Thereafter]]. */ + object syntax { + import scala.language.implicitConversions + implicit def ThereafterOps[F[_], A](target: F[A])(implicit + tc: Thereafter[F] + ): Ops[F, tc.Content, A] = new Ops[F, tc.Content, A] { + override val self: F[A] = target + override val typeClassInstance: Thereafter.Aux[F, tc.Content] = tc + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/TrieMapUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/TrieMapUtil.scala new file mode 100644 index 0000000000..48d7a29275 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/TrieMapUtil.scala @@ -0,0 +1,40 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import scala.collection.concurrent.TrieMap + +object TrieMapUtil { + + /** Idempotent insertion into TrieMap. + * + * - Inserts the key-value pair if the key is absent. + * - Idempotency: Ignores the insertion if the key already exists and the existing value is equal to the new value. + * - Fails if the key already exists and the existing value differs to the new value. + * + * @param errorFn: Takes the key, oldValue, newValue and returns an appropriate error. + */ + def insertIfAbsent[K, V, E]( + map: TrieMap[K, V], + key: K, + newValue: V, + errorFn: (K, V, V) => E, + ): Either[E, Unit] = { + + map.putIfAbsent(key, newValue) match { + case None => Right(()) + case Some(oldValue) => + Either.cond(oldValue == newValue, (), errorFn(key, oldValue, newValue)) + } + } + + def insertIfAbsent[K, V, E]( + map: TrieMap[K, V], + key: K, + newValue: V, + staticErrorFn: => E, + ): Either[E, Unit] = + insertIfAbsent(map, key, newValue, (_: K, _: V, _: V) => staticErrorFn) + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/TryUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/TryUtil.scala new file mode 100644 index 0000000000..b5b245ab7d --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/TryUtil.scala @@ -0,0 +1,27 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +import scala.util.control.NonFatal +import scala.util.{Failure, Success, Try} + +object TryUtil { + + /** Constructs a `Try` using the by-name parameter. This + * method will ensure any non-fatal exception and [[java.lang.InterruptedException]] is caught and a + * `Failure` object is returned. + */ + def tryCatchInterrupted[A](r: => A): Try[A] = + try Success(r) + catch { + case e: InterruptedException => Failure(e) + case NonFatal(e) => Failure(e) + } + + implicit final class ForFailedOps[A](private val a: Try[A]) extends AnyVal { + @inline + def forFailed(f: Throwable => Unit): Unit = a.fold(f, _ => ()) + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/package.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/package.scala new file mode 100644 index 0000000000..68ad13e2e1 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/package.scala @@ -0,0 +1,21 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton + +import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggingContext} +import com.digitalasset.canton.tracing.TraceContext + +package object util { + type TracedLazyVal[T] = LazyValWithContext[T, TraceContext] + val TracedLazyVal: LazyValWithContextCompanion[TraceContext] = + new LazyValWithContextCompanion[TraceContext] {} + + type ErrorLoggingLazyVal[T] = LazyValWithContext[T, ErrorLoggingContext] + val ErrorLoggingLazyVal: LazyValWithContextCompanion[ErrorLoggingContext] = + new LazyValWithContextCompanion[ErrorLoggingContext] {} + + type NamedLoggingLazyVal[T] = LazyValWithContext[T, NamedLoggingContext] + val NamedLoggingLazyVal: LazyValWithContextCompanion[NamedLoggingContext] = + new LazyValWithContextCompanion[NamedLoggingContext] {} +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/Jitter.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/Jitter.scala new file mode 100644 index 0000000000..bdffee0c0f --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/Jitter.scala @@ -0,0 +1,135 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util.retry + +import java.util.Random +import java.util.concurrent.ThreadLocalRandom +import scala.concurrent.duration.* + +trait Jitter { + + def apply(start: FiniteDuration, last: FiniteDuration, attempt: Int): FiniteDuration = + next(convert(start, last.unit), last, attempt) + + protected def next(start: FiniteDuration, last: FiniteDuration, attempt: Int): FiniteDuration + + protected def convert(dur: Duration, unit: TimeUnit): FiniteDuration = + FiniteDuration(dur.toUnit(unit).toLong, unit) + + protected def capped(input: FiniteDuration, cap: Duration)(op: Long => Long): FiniteDuration = { + if (!cap.isFinite) { + Duration(op(input.length), input.unit) + } else { + val ceil = convert(cap, input.unit) + val value = op(input.length) + if (value >= ceil.length) ceil + else Duration(value, input.unit) + } + } + + protected def pow(start: Long, base: Long, attempt: Long): Long = { + val temp = start * math.pow(base.toDouble, attempt.toDouble) + if (temp < 0.0 || temp > Long.MaxValue.toDouble) Long.MaxValue + else temp.toLong + } + + protected def cappedPow( + start: FiniteDuration, + cap: Duration, + base: Long, + attempt: Long, + ): FiniteDuration = + capped(start, cap) { len => + pow(len, base, attempt) + } +} + +/** The algorithms here were inspired by this article: + * https://www.awsarchitectureblog.com/2015/03/backoff.html + */ +object Jitter { + + /** Given a lower and upper bound (inclusive) generate a random + * number within those bounds + */ + type RandomSource = (Long, Long) => Long + + val defaultRandomSource: Jitter.RandomSource = Jitter.randomSource(ThreadLocalRandom.current()) + + /** Create a RandomSource from an instance of java.util.Random + * Please be mindful of the call-by-name semantics + */ + def randomSource(random: => Random): RandomSource = { (l, u) => + val (_l, _u) = if (l < u) (l, u) else (u, l) + nextLong(random, (_u - _l) + 1) + _l + } + + /** Simple exponential backoff + cap */ + def none(cap: Duration, base: Int = 2): Jitter = + new Jitter { + protected def next( + start: FiniteDuration, + last: FiniteDuration, + attempt: Int, + ): FiniteDuration = + cappedPow(start, cap, base.toLong, attempt.toLong) + + override def toString = s"retry.Jitter.none($cap, $base)" + } + + /** Normal exponential backoff + cap + random jitter */ + def full(cap: Duration, random: RandomSource = defaultRandomSource, base: Int = 2): Jitter = + new Jitter { + protected def next( + start: FiniteDuration, + last: FiniteDuration, + attempt: Int, + ): FiniteDuration = { + val temp = cappedPow(start, cap, base.toLong, attempt.toLong) + Duration(random(0, temp.length), temp.unit) + } + + override def toString = s"retry.Jitter.full($cap, $random, $base)" + } + + /** Always keep some of the backoff and jitter by a smaller amount (prevents very short sleeps) */ + def equal(cap: Duration, random: RandomSource = defaultRandomSource, base: Int = 2): Jitter = + new Jitter { + protected def next( + start: FiniteDuration, + last: FiniteDuration, + attempt: Int, + ): FiniteDuration = { + val temp = cappedPow(start, cap, base.toLong, attempt.toLong) + Duration(temp.length / 2 + random(0, temp.length / 2), temp.unit) + } + + override def toString = s"retry.Jitter.equal($cap, $random, $base)" + } + + /** similar to full, but we also increase the maximum jitter started on the last random value. */ + def decorrelated( + cap: Duration, + random: RandomSource = defaultRandomSource, + base: Int = 3, + ): Jitter = + new Jitter { + protected def next( + start: FiniteDuration, + last: FiniteDuration, + attempt: Int, + ): FiniteDuration = + capped(start, cap) { len => + random(len, last.length * base) + } + + override def toString = s"retry.Jitter.decorrelated($cap, $random, $base)" + } + + private def nextLong(random: Random, n: Long) = { + if (n <= 0L) throw new IllegalArgumentException() + + new scala.util.Random(random).nextLong(n) + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/LICENSE b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/LICENSE new file mode 100644 index 0000000000..8d9156d9d1 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013-2014 Doug Tangren, Nathan Hamblen + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/Policy.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/Policy.scala new file mode 100644 index 0000000000..fe61e96fc0 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/Policy.scala @@ -0,0 +1,525 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util.retry + +import cats.Eval +import cats.syntax.flatMap.* +import com.digitalasset.canton.concurrent.DirectExecutionContext +import com.digitalasset.canton.lifecycle.UnlessShutdown.{AbortedDueToShutdown, Outcome} +import com.digitalasset.canton.lifecycle.{ + FutureUnlessShutdown, + PerformUnlessClosing, + UnlessShutdown, +} +import com.digitalasset.canton.logging.{ErrorLoggingContext, TracedLogger} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.canton.util.retry.RetryUtil.{ + AllExnRetryable, + ErrorKind, + ExceptionRetryable, + NoErrorKind, +} +import com.digitalasset.canton.util.retry.RetryWithDelay.{RetryOutcome, RetryTermination} +import com.digitalasset.canton.util.{DelayUtil, LoggerUtil} +import org.slf4j.event.Level + +import scala.concurrent.duration.{Duration, FiniteDuration} +import scala.concurrent.{ExecutionContext, Future} +import scala.util.control.NonFatal +import scala.util.{Failure, Try} + +/** A retry [[com.digitalasset.canton.util.retry.Policy]] defines an interface for retrying a future-based task with + * retry semantics specific to implementations. If the task throws a non-fatal exceptions synchronously, the exception is + * converted into an asynchronous one, i.e., it is returned as a failed future or retried. + * + * If unsure about what retry policy to pick, [[com.digitalasset.canton.util.retry.Backoff]] is a good default. + */ +abstract class Policy(logger: TracedLogger) { + + protected val directExecutionContext: DirectExecutionContext = DirectExecutionContext(logger) + + def apply[T](task: => Future[T], retryOk: ExceptionRetryable)(implicit + success: Success[T], + executionContext: ExecutionContext, + traceContext: TraceContext, + ): Future[T] + + def unlessShutdown[T](task: => FutureUnlessShutdown[T], retryOk: ExceptionRetryable)(implicit + success: Success[T], + executionContext: ExecutionContext, + traceContext: TraceContext, + ): FutureUnlessShutdown[T] + +} + +object Policy { + + /** Repeatedly execute the task until it doesn't throw an exception or the `flagCloseable` is closing. */ + def noisyInfiniteRetry[A]( + task: => Future[A], + performUnlessClosing: PerformUnlessClosing, + retryInterval: FiniteDuration, + operationName: String, + actionable: String, + )(implicit + loggingContext: ErrorLoggingContext, + executionContext: ExecutionContext, + ): FutureUnlessShutdown[A] = + noisyInfiniteRetryUS( + FutureUnlessShutdown.outcomeF(task), + performUnlessClosing, + retryInterval, + operationName, + actionable, + ) + + /** Repeatedly execute the task until it returns an abort due to shutdown, doesn't throw an exception, or the `flagCloseable` is closing. */ + def noisyInfiniteRetryUS[A]( + task: => FutureUnlessShutdown[A], + performUnlessClosing: PerformUnlessClosing, + retryInterval: FiniteDuration, + operationName: String, + actionable: String, + )(implicit + loggingContext: ErrorLoggingContext, + executionContext: ExecutionContext, + ): FutureUnlessShutdown[A] = + Pause( + loggingContext.logger, + performUnlessClosing, + maxRetries = Int.MaxValue, + retryInterval, + operationName = operationName, + actionable = Some(actionable), + ).unlessShutdown(task, AllExnRetryable)( + Success.always, + executionContext, + loggingContext.traceContext, + ) +} + +abstract class RetryWithDelay( + logger: TracedLogger, + operationName: String, + longDescription: String, + actionable: Option[String], // How to mitigate the error + initialDelay: FiniteDuration, + totalMaxRetries: Int, + performUnlessClosing: PerformUnlessClosing, + retryLogLevel: Option[Level], + suspendRetries: Eval[FiniteDuration], +) extends Policy(logger) { + + private val complainAfterRetries: Int = 10 + + private val actionableMessage: String = actionable.map(" " + _).getOrElse("") + + protected def nextDelay(nextCount: Int, delay: FiniteDuration): FiniteDuration + + /** A [[com.digitalasset.canton.util.retry.Success]] criteria is supplied + * to determine whether the future-based task has succeeded, or if it should perhaps be retried. Retries are not + * performed after the [[com.digitalasset.canton.lifecycle.FlagCloseable]] has been closed. In that case, the + * Future is completed with the last result (even if it is an outcome that doesn't satisfy the `success` predicate). + */ + override def apply[T]( + task: => Future[T], + retryable: ExceptionRetryable, + )(implicit + success: Success[T], + executionContext: ExecutionContext, + traceContext: TraceContext, + ): Future[T] = + retryWithDelay(task, retryable, executionContext).transform { + case util.Success(RetryOutcome(outcome, _termination)) => + outcome + case Failure(failure) => + logger.error("retryWithDelay failed unexpectedly", failure) + Failure(failure) + }(directExecutionContext) + + /** In contrast to [[com.digitalasset.canton.util.retry.RetryWithDelay.apply]], this Policy completes the returned + * future with `AbortedDueToShutdown` if the retry is aborted due to the corresponding + * [[com.digitalasset.canton.lifecycle.FlagCloseable]] being closed or if the task itself reports a shutdown (and + * not with the last result). + * + * Unless your task does already naturally return a `FutureUnlessShutdown[T]`, using + * [[com.digitalasset.canton.util.retry.RetryWithDelay.apply]] is likely sufficient to make it robust against + * shutdowns. + */ + override def unlessShutdown[T]( + task: => FutureUnlessShutdown[T], + retryable: ExceptionRetryable, + )(implicit + success: Success[T], + executionContext: ExecutionContext, + traceContext: TraceContext, + ): FutureUnlessShutdown[T] = + FutureUnlessShutdown { + retryWithDelay(task.unwrap, retryable, executionContext)(Success.onShutdown, implicitly) + .transform { + case util.Success(outcome) => + util.Success(outcome.toUnlessShutdown.flatten) + case Failure(failure) => + logger.error("retryWithDelay failed unexpectedly", failure) + Failure(failure) + }(directExecutionContext) + } + + private def retryWithDelay[T]( + task: => Future[T], + retryable: ExceptionRetryable, + executionContext: ExecutionContext, + )(implicit success: Success[T], traceContext: TraceContext): Future[RetryOutcome[T]] = { + implicit val loggingContext: ErrorLoggingContext = ErrorLoggingContext.fromTracedLogger(logger) + + import LoggerUtil.logOnThrow + + def runTask(): Future[T] = Future.fromTry(Try(task)).flatten + + def run( + previousResult: Future[T], + totalRetries: Int, + lastErrorKind: ErrorKind, + retriesOfLastErrorKind: Int, + delay: FiniteDuration, + ): Future[RetryOutcome[T]] = logOnThrow { + previousResult.transformWith { x => + logOnThrow( + x match { + case succ @ util.Success(result) if success.predicate(result) => + logger.trace( + s"The operation '$operationName' was successful. No need to retry. $longDescription" + ) + Future.successful(RetryOutcome(succ, RetryTermination.Success)) + + case outcome if performUnlessClosing.isClosing => + logger.debug( + s"Giving up on retrying the operation '$operationName' due to shutdown. Last attempt was $lastErrorKind" + ) + Future.successful(RetryOutcome(outcome, RetryTermination.Shutdown)) + + case outcome if totalMaxRetries < Int.MaxValue && totalRetries >= totalMaxRetries => + logger.info( + messageOfOutcome( + outcome, + s"Total maximum number of retries $totalMaxRetries exceeded. Giving up.", + ), + throwableOfOutcome(outcome), + ) + Future.successful(RetryOutcome(outcome, RetryTermination.GiveUp)) + + case outcome => + // this will also log the exception in outcome + val errorKind = retryable.retryOK(outcome, logger, Some(lastErrorKind)) + val retriesOfErrorKind = if (errorKind == lastErrorKind) retriesOfLastErrorKind else 0 + if ( + errorKind.maxRetries == Int.MaxValue || retriesOfErrorKind < errorKind.maxRetries + ) { + val suspendDuration = suspendRetries.value + if (suspendDuration > Duration.Zero) { + logger.info( + s"Suspend retrying the operation '$operationName' for $suspendDuration." + ) + DelayUtil + .delayIfNotClosing(operationName, suspendDuration, performUnlessClosing) + .onShutdown(())(directExecutionContext) + .flatMap(_ => run(previousResult, 0, errorKind, 0, initialDelay))( + directExecutionContext + ) + } else { + val level = retryLogLevel.getOrElse { + if (totalRetries < complainAfterRetries || totalMaxRetries != Int.MaxValue) + Level.INFO + else Level.WARN + } + val change = if (errorKind == lastErrorKind) { + "" + } else { + s"New kind of error: $errorKind. " + } + LoggerUtil.logAtLevel( + level, + messageOfOutcome(outcome, show"${change}Retrying after $delay."), + // No need to log the exception in the outcome, as this has been logged by retryable.retryOk. + ) + + val delayedF = + DelayUtil.delayIfNotClosing(operationName, delay, performUnlessClosing) + delayedF + .flatMap { _ => + logOnThrow { + LoggerUtil.logAtLevel( + level, + s"Now retrying operation '$operationName'. $longDescription$actionableMessage", + ) + // Run the task again on the normal execution context as the task might take a long time. + // `performUnlessClosingF` guards against closing the execution context. + val nextRunUnlessShutdown = + performUnlessClosing.performUnlessClosingF(operationName)(runTask())( + executionContext, + traceContext, + ) + @SuppressWarnings(Array("org.wartremover.warts.TryPartial")) + val nextRunF = nextRunUnlessShutdown + .onShutdown { + // If we're closing, report the previous `outcome` and recurse. + // This will enter the case branch with `flagCloseable.isClosing` + // and therefore yield the termination reason `Shutdown`. + outcome.get + }( + // Use the direct execution context as this is a small task. + // The surrounding `performUnlessClosing` ensures that this post-processing + // is registered with the normal execution context before it can close. + directExecutionContext + ) + FutureUnlessShutdown.outcomeF( + run( + nextRunF, + totalRetries + 1, + errorKind, + retriesOfErrorKind + 1, + nextDelay(totalRetries + 1, delay), + ) + )(executionContext) + } + }( + // It is safe to use the general execution context here by the following argument. + // - If the `onComplete` executes before `DelayUtil` completes the returned promise, + // then the completion of the promise will schedule the function immediately. + // Since this completion is guarded by `performUnlessClosing`, + // the body gets scheduled with `executionContext` before `flagCloseable`'s close method completes. + // - If `DelayUtil` completes the returned promise before the `onComplete` call executes, + // the `onComplete` call itself will schedule the body + // and this is guarded by the `performUnlessClosing` above. + // Therefore the execution context is still open when the scheduling happens. + executionContext + ) + .onShutdown( + RetryOutcome(outcome, RetryTermination.Shutdown) + )(executionContext) + } + } else { + logger.info( + messageOfOutcome( + outcome, + s"Maximum number of retries ${errorKind.maxRetries} exceeded. Giving up.", + // No need to log the exception in outcome, as this has been logged by retryable.retryOk. + ) + ) + Future.successful(RetryOutcome(outcome, RetryTermination.GiveUp)) + } + } + ) + // Although in most cases, it may be ok to schedule the body on executionContext, + // there is a chance that executionContext is closed when the body is scheduled. + // By choosing directExecutionContext, we avoid a RejectedExecutionException in this case. + }(directExecutionContext) + } + + // Run 0: Run task without checking `flagCloseable`. If necessary, the client has to check for closing. + // Run 1 onwards: Only run this if `flagCloseable` is not closing. + // (The check is performed at the recursive call.) + // Checking at the client would be very difficult, because the client would have to deal with a closed EC. + run(runTask(), 0, NoErrorKind, 0, initialDelay) + } + + private def messageOfOutcome( + outcome: Try[Any], + consequence: String, + ): String = outcome match { + case util.Success(result) => + s"The operation '$operationName' was not successful. $consequence Result: $result. $longDescription" + case Failure(_) => + s"The operation '$operationName' has failed with an exception. $consequence $longDescription$actionableMessage" + } + + @SuppressWarnings(Array("org.wartremover.warts.Null")) + private def throwableOfOutcome(outcome: Try[Any]): Throwable = outcome.fold(identity, null) +} + +object RetryWithDelay { + + /** The outcome of the last run of the task, + * along with the condition that stopped the retry. + */ + private final case class RetryOutcome[A](outcome: Try[A], termination: RetryTermination) { + + /** @throws java.lang.Throwable Rethrows the exception if [[outcome]] is a [[scala.util.Failure]] */ + @SuppressWarnings(Array("org.wartremover.warts.TryPartial")) + def toUnlessShutdown: UnlessShutdown[A] = { + if (termination == RetryTermination.Shutdown) AbortedDueToShutdown + else Outcome(outcome.get) + } + } + private sealed trait RetryTermination extends Product with Serializable + private[RetryWithDelay] object RetryTermination { + + /** The task completed successfully */ + case object Success extends RetryTermination + + /** The retry limit was exceeded or an exception was deemed not retryable */ + case object GiveUp extends RetryTermination + + /** The retry stopped due to shutdown */ + case object Shutdown extends RetryTermination + } +} + +/** Retry immediately after failure for a max number of times */ +final case class Directly( + logger: TracedLogger, + performUnlessClosing: PerformUnlessClosing, + maxRetries: Int, + operationName: String, + longDescription: String = "", + retryLogLevel: Option[Level] = None, + suspendRetries: Eval[FiniteDuration] = Eval.now(Duration.Zero), +) extends RetryWithDelay( + logger, + operationName, + longDescription, + None, + Duration.Zero, + maxRetries, + performUnlessClosing, + retryLogLevel, + suspendRetries, + ) { + + override def nextDelay(nextCount: Int, delay: FiniteDuration): FiniteDuration = Duration.Zero +} + +/** Retry with a pause between attempts for a max number of times */ +final case class Pause( + logger: TracedLogger, + performUnlessClosing: PerformUnlessClosing, + maxRetries: Int, + delay: FiniteDuration, + operationName: String, + longDescription: String = "", + actionable: Option[String] = None, + retryLogLevel: Option[Level] = None, + suspendRetries: Eval[FiniteDuration] = Eval.now(Duration.Zero), +) extends RetryWithDelay( + logger, + operationName, + longDescription, + actionable, + delay, + maxRetries, + performUnlessClosing, + retryLogLevel, + suspendRetries, + ) { + + override def nextDelay(nextCount: Int, delay: FiniteDuration): FiniteDuration = delay +} + +/** A retry policy which will back off using a configurable policy which + * incorporates random jitter. This has the advantage of reducing contention + * if you have threaded clients using the same service. + * + * {{{ + * val policy = retry.Backoff() + * val future = policy(issueRequest) + * }}} + * + * The following pre-made jitter algorithms are available for you to use: + * + * - [[Jitter.none]] + * - [[Jitter.full]] + * - [[Jitter.equal]] + * - [[Jitter.decorrelated]] + * + * You can choose one like this: + * {{{ + * implicit val jitter = retry.Jitter.full(cap = 5.minutes) + * val policy = retry.Backoff(1 second) + * val future = policy(issueRequest) + * }}} + * + * If a jitter policy isn't in scope, it will use [[Jitter.full]] by + * default which tends to cause clients slightly less work at the cost of + * slightly more time. + * + * For more information about the algorithms, see the following article: + * + * [[https://www.awsarchitectureblog.com/2015/03/backoff.html]] + * + * If the retry is not successful after `maxRetries`, the future is completed with its last result. + */ +final case class Backoff( + logger: TracedLogger, + flagCloseable: PerformUnlessClosing, + maxRetries: Int, + initialDelay: FiniteDuration, + maxDelay: Duration, + operationName: String, + longDescription: String = "", + actionable: Option[String] = None, + retryLogLevel: Option[Level] = None, + suspendRetries: Eval[FiniteDuration] = Eval.now(Duration.Zero), +)(implicit jitter: Jitter = Jitter.full(maxDelay)) + extends RetryWithDelay( + logger, + operationName, + longDescription, + actionable, + initialDelay, + maxRetries, + flagCloseable, + retryLogLevel, + suspendRetries, + ) { + + override def nextDelay(nextCount: Int, delay: FiniteDuration): FiniteDuration = + jitter(initialDelay, delay, nextCount) +} + +/** A retry policy in which the failure determines the way a future should be retried. + * The partial function `depends` provided may define the domain of both the success OR exceptional + * failure of a future fails explicitly. + * + * {{{ + * val policy = retry.When { + * case RetryAfter(retryAt) => retry.Pause(delay = retryAt) + * } + * val future = policy(issueRequest) + * }}} + * + * If the result is not defined for the depends block, the future will not + * be retried. + */ +final case class When( + logger: TracedLogger, + depends: PartialFunction[Any, Policy], +) extends Policy(logger) { + + override def apply[T](task: => Future[T], retryable: ExceptionRetryable)(implicit + success: Success[T], + executionContext: ExecutionContext, + traceContext: TraceContext, + ): Future[T] = { + val fut = Future.fromTry(Try(task)).flatten + fut + .flatMap { res => + if (success.predicate(res) || !depends.isDefinedAt(res)) fut + else depends(res)(task, retryable) + }(directExecutionContext) + .recoverWith { case NonFatal(e) => + if (depends.isDefinedAt(e) && retryable.retryOK(Failure(e), logger, None).maxRetries > 0) + depends(e)(task, retryable) + else fut + }(directExecutionContext) + } + + override def unlessShutdown[T](task: => FutureUnlessShutdown[T], retryOk: ExceptionRetryable)( + implicit + success: Success[T], + executionContext: ExecutionContext, + traceContext: TraceContext, + ): FutureUnlessShutdown[T] = + FutureUnlessShutdown(apply(task.unwrap, retryOk)(Success.onShutdown, implicitly, implicitly)) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/RetryEither.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/RetryEither.scala new file mode 100644 index 0000000000..e655f501b1 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/RetryEither.scala @@ -0,0 +1,95 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util.retry + +import cats.data.EitherT +import cats.syntax.either.* +import cats.syntax.flatMap.* +import com.digitalasset.canton.concurrent.Threading +import com.digitalasset.canton.lifecycle.{CloseContext, UnlessShutdown} +import com.digitalasset.canton.logging.{ErrorLoggingContext, TracedLogger} +import com.digitalasset.canton.util.LoggerUtil +import org.slf4j.event.Level + +/** Simple form of the retry policies that operate on Either and not Future[T]. + * Only provides a Pause-based retry. + */ +object RetryEither { + def retry[A, B]( + maxRetries: Int, + waitInMs: Long, + operationName: String, + logger: TracedLogger, + stopOnLeft: Option[A => Boolean] = None, + retryLogLevel: Level = Level.INFO, + failLogLevel: Level = Level.WARN, + )( + body: => Either[A, B] + )(implicit + loggingContext: ErrorLoggingContext, + closeContext: CloseContext, + ): EitherT[UnlessShutdown, A, B] = { + retryUnlessShutdown( + maxRetries, + waitInMs, + operationName, + logger, + stopOnLeft, + retryLogLevel, + failLogLevel, + )( + EitherT(UnlessShutdown.Outcome(body)) + ) + } + + def retryUnlessShutdown[A, B]( + maxRetries: Int, + waitInMs: Long, + operationName: String, + logger: TracedLogger, + stopOnLeft: Option[A => Boolean] = None, + retryLogLevel: Level = Level.INFO, + failLogLevel: Level = Level.WARN, + )( + body: => EitherT[UnlessShutdown, A, B] + )(implicit + loggingContext: ErrorLoggingContext, + closeContext: CloseContext, + ): EitherT[UnlessShutdown, A, B] = { + maxRetries.tailRecM { retryCount => + EitherT { + closeContext.context + .performUnlessClosing(operationName)(body)(loggingContext.traceContext) + .flatMap { + _.value.map { + _.map(Right(_)) + .leftFlatMap { err => + if (stopOnLeft.exists(fn => fn(err))) { + // Stop the retry attempts on this particular Left if stopOnLeft is true + Left(err) + } else if (retryCount <= 0) { + // Stop the recursion with the error if we exhausted the max retries + LoggerUtil.logAtLevel( + failLogLevel, + s"Operation $operationName failed, exhausted retries: $err", + ) + Left(err) + } else { + // Retry the operation if it failed but we have retries left + LoggerUtil.logAtLevel( + retryLogLevel, + s"Operation $operationName failed, retrying in ${waitInMs}ms: $err", + ) + Threading.sleep(waitInMs) + val nextRetry = if (retryCount == Int.MaxValue) Int.MaxValue else retryCount - 1 + Right(Left(nextRetry)) + } + } + } + } + } + } + } + +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/RetryUtil.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/RetryUtil.scala new file mode 100644 index 0000000000..59c0520551 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/RetryUtil.scala @@ -0,0 +1,271 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util.retry + +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.logging.{ErrorLoggingContext, TracedLogger} +import com.digitalasset.canton.resource.DatabaseStorageError.DatabaseStorageDegradation.DatabaseTaskRejected +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.TryUtil.ForFailedOps +import org.postgresql.util.PSQLException + +import java.sql.* +import scala.annotation.tailrec +import scala.util.{Failure, Try} + +object RetryUtil { + + /** When using retry code in different contexts, different exceptions should be retried on. This trait provides a + * way to define what exceptions should be retried and which are fatal. + */ + trait ExceptionRetryable { + + /** Determines what kind of error (if any) resulted in the outcome, + * and gives a recommendation on how many times to retry. + * + * Also logs the embedded exception. + */ + def retryOK(outcome: Try[_], logger: TracedLogger, lastErrorKind: Option[ErrorKind])(implicit + tc: TraceContext + ): ErrorKind + + protected def logThrowable(e: Throwable, logger: TracedLogger)(implicit + traceContext: TraceContext + ): Unit = e match { + case sqlE: SQLException => + // Unfortunately, the sql state and error code won't get logged automatically. + logger.info( + s"Detected an SQLException. SQL state: ${sqlE.getSQLState}, error code: ${sqlE.getErrorCode}", + e, + ) + case _: Throwable => + logger.info(s"Detected an error.", e) + } + } + + sealed trait ErrorKind { + def maxRetries: Int + } + + case object NoErrorKind extends ErrorKind { + override val maxRetries: Int = Int.MaxValue + + override def toString: String = "no error (request infinite retries)" + } + + case object FatalErrorKind extends ErrorKind { + override val maxRetries = 0 + + override def toString: String = "fatal error (give up immediately)" + } + + /** Main use case is a network outage. Infinite retries are needed, as we don't know how long the outage takes. + */ + case object TransientErrorKind extends ErrorKind { + override val maxRetries: Int = Int.MaxValue + + override def toString: String = "transient error (request infinite retries)" + } + + /** Main use case is a transient unique constraint violation due to racy merge statements. + * Should go away after a very limited amount of retries. + */ + case object SpuriousTransientErrorKind extends ErrorKind { + // Value determined empirically in UpsertTestOracle. + // For single row inserts, 1 is sufficient. + // For batched inserts, 3 was more than sufficient in the test. + override val maxRetries = 10 + + override def toString: String = + s"possibly spurious transient error (request up to $maxRetries retries)" + } + + /** Defines which should be retryable when thrown by the database. + */ + case object DbExceptionRetryable extends ExceptionRetryable { + + def retryOKForever(error: Throwable, logger: TracedLogger)(implicit + tc: TraceContext + ): Boolean = { + // Don't retry forever on "contention" errors, as these may not actually be due to contention and get stuck + // forever. Eg unique constraint violation exceptions can be caused by contention in H2 leading to data anomalies. + DbExceptionRetryable.retryOK(Failure(error), logger, None).maxRetries == Int.MaxValue + } + + override def retryOK( + outcome: Try[_], + logger: TracedLogger, + lastErrorKind: Option[ErrorKind], + )(implicit + tc: TraceContext + ): ErrorKind = { + outcome match { + case util.Success(_) => NoErrorKind + case ff @ Failure(exception) => + val errorKind = retryOKInternal(ff, logger) + // only log the full exception if the error kind changed such that we avoid spamming the logs + if (!lastErrorKind.contains(errorKind)) { + logThrowable(exception, logger) + } else { + logger.debug( + s"Retrying on same error kind ${errorKind} for ${exception.getClass.getSimpleName}/${exception.getMessage}" + ) + } + errorKind + } + } + + @tailrec private def retryOKInternal( + outcome: Failure[_], + logger: TracedLogger, + )(implicit + tc: TraceContext + ): ErrorKind = { + outcome.exception match { + case exn: java.util.concurrent.RejectedExecutionException => + // This occurs when slick's task queue is full + + // Create a CantonError so that the error code gets logged. + DatabaseTaskRejected(exn.toString)( + ErrorLoggingContext.fromTracedLogger(logger) + ).discard + + TransientErrorKind + + case exception: PSQLException => + // Error codes documented here: https://www.postgresql.org/docs/9.6/errcodes-appendix.html + val error = exception.getSQLState + + if (error.startsWith("08")) { + // Class 08 — Connection Exception + TransientErrorKind + } else if (error == "40001") { + // Class 40 — Transaction Rollback: 40001 serialization_failure + // Failure to serialize db accesses, happens due to contention + TransientErrorKind + } else if (error == "40P01") { + // Deadlock + // See DatabaseDeadlockTestPostgres + // This also covers deadlocks reported as BatchUpdateExceptions, + // because they refer to a PSQLException has cause. + TransientErrorKind + } else if (error == "25006") { + // Retry on read only transaction, which can occur on Azure + TransientErrorKind + } else if (error.startsWith("57P") && error != "57P014" && error != "57P04") { + // Retry on operator invention errors, otherwise Canton components crash in an uncontrolled manner when + // the exception bubbles up (don't retry on `query_canceled` and `database_dropped`) + TransientErrorKind + } else if ( + error == "53000" || error == "53100" || error == "53200" || error == "53300" || error == "53400" + ) { + // Retry insufficient db resource errors + TransientErrorKind + } else { + // Don't retry on other exceptions. These other exceptions should be those for which retrying typically won't + // help, for example a unique constraint violation. + logger.info(s"Fatal sql exception has error code: $error") + FatalErrorKind + } + + case _: SQLIntegrityConstraintViolationException => + // Both H2 and Oracle may fail with spurious constraint violations, due to racy implementation of the MERGE statements. + // In H2, this may also occur because it does not properly implement the serializable isolation level. + // See UpsertTestOracle + // See https://github.com/h2database/h2database/issues/2167 + SpuriousTransientErrorKind + + case _: SQLRecoverableException | _: SQLTransientException | + _: SQLNonTransientConnectionException => + TransientErrorKind + + // Handle SQLException and all classes that derive from it (e.g. java.sql.BatchUpdateException) + // Note that if the exception is not known but has a cause, we'll base the retry on the cause + case ex: SQLException => + val code = ex.getErrorCode + if (ex.getErrorCode == 1) { + // Retry on ORA-00001: unique constraint violated exception + SpuriousTransientErrorKind + } else if (ex.getMessage == "Connection is closed") { + // May fail with a "Connection is closed" message if the db has gone down + TransientErrorKind + } else if (ex.getErrorCode == 4021) { + // ORA timeout occurred while waiting to lock object + TransientErrorKind + } else if (ex.getErrorCode == 54) { + // ORA timeout occurred while waiting to lock object or because NOWAIT has been set + // e.g. as part of truncate table + TransientErrorKind + } else if (ex.getErrorCode == 60) { + // Deadlock + // See DatabaseDeadlockTestOracle + TransientErrorKind + } else if ( + ex.getErrorCode == 604 && + List("ORA-08176", "ORA-08177").exists(ex.getMessage.contains) + ) { + // Oracle failure in a batch operation + // For Oracle, the `cause` is not always set properly for exceptions. This is a problem for batched queries. + // So, look through an exception's `message` to see if it contains a retryable problem. + TransientErrorKind + } else if (ex.getErrorCode == 8176) { + // consistent read failure; rollback data not available + // Cause: Encountered data changed by an operation that does not generate rollback data + // Action: In read/write transactions, retry the intended operation. + TransientErrorKind + } else if (ex.getErrorCode == 8177) { + // failure to serialize transaction with serializable isolation level + TransientErrorKind + } else if (ex.getErrorCode == 17410) { + // No more data to read from socket, can be caused by network problems + SpuriousTransientErrorKind + } else if (code == 17002) { + // This has been observed as either IO Error: Connection reset by peer or IO Error: Broken pipe + // when straight-up killing an Oracle database server (`kill -9 `) + TransientErrorKind + } else if (code == 1088 || code == 1089 || code == 1090 || code == 1092) { + // Often observed for orderly Oracle shutdowns + // https://docs.oracle.com/en/database/oracle/oracle-database/19/errmg/ORA-00910.html#GUID-D9EBDFFA-88C6-4185-BD2C-E1B959A97274 + TransientErrorKind + } else if (ex.getCause != null) { + logger.info("Unable to retry on exception, checking cause.") + retryOKInternal(Failure(ex.getCause), logger) + } else { + FatalErrorKind + } + + case _ => FatalErrorKind + } + } + } + + /** Retry on any exception. + * + * This is a sensible default choice for non-db tasks with a finite maximum number of retries. + */ + case object AllExnRetryable extends ExceptionRetryable { + + override def retryOK(outcome: Try[_], logger: TracedLogger, lastErrorKind: Option[ErrorKind])( + implicit tc: TraceContext + ): ErrorKind = { + outcome.forFailed(t => logThrowable(t, logger)) + NoErrorKind + } + + } + + /** Don't retry on any exception. + */ + case object NoExnRetryable extends ExceptionRetryable { + + override def retryOK(outcome: Try[_], logger: TracedLogger, lastErrorKind: Option[ErrorKind])( + implicit tc: TraceContext + ): ErrorKind = outcome match { + case Failure(ex) => + logThrowable(ex, logger) + FatalErrorKind + case util.Success(_) => NoErrorKind + } + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/Success.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/Success.scala new file mode 100644 index 0000000000..d76488ed37 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/Success.scala @@ -0,0 +1,42 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util.retry + +import com.digitalasset.canton.lifecycle.UnlessShutdown +import com.digitalasset.canton.lifecycle.UnlessShutdown.{AbortedDueToShutdown, Outcome} + +import scala.annotation.implicitNotFound +import scala.util.Try + +@implicitNotFound( + "Cannot find an implicit retry.Success for the given type of Future, either require one yourself or import retry.Success._" +) +class Success[-T](val predicate: T => Boolean) { + def or[TT <: T](that: Success[TT]): Success[TT] = + Success[TT](v => predicate(v) || that.predicate(v)) + def or[TT <: T](that: => Boolean): Success[TT] = or(Success[TT](_ => that)) + def and[TT <: T](that: Success[TT]): Success[TT] = + Success[TT](v => predicate(v) && that.predicate(v)) + def and[TT <: T](that: => Boolean): Success[TT] = and(Success[TT](_ => that)) +} + +object Success { + implicit def either[A, B]: Success[Either[A, B]] = + Success(_.isRight) + implicit def option[A]: Success[Option[A]] = + Success(!_.isEmpty) + implicit def tried[A]: Success[Try[A]] = + Success(_.isSuccess) + + val always = Success(Function.const(true)) + val never = Success(Function.const(false)) + + def onShutdown[T](implicit success: Success[T]): Success[UnlessShutdown[T]] = + Success[UnlessShutdown[T]] { + case Outcome(result) => success.predicate(result) + case AbortedDueToShutdown => true + } + + def apply[T](pred: T => Boolean) = new Success(pred) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/package.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/package.scala new file mode 100644 index 0000000000..8320f49173 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/util/retry/package.scala @@ -0,0 +1,8 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util + +package object retry { + val Forever: Int = Int.MaxValue +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/HasProtocolVersionedWrapper.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/HasProtocolVersionedWrapper.scala new file mode 100644 index 0000000000..41ebf0afd4 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/HasProtocolVersionedWrapper.scala @@ -0,0 +1,916 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.version + +import cats.syntax.either.* +import cats.syntax.foldable.* +import cats.syntax.functor.* +import com.daml.nonempty.{NonEmpty, NonEmptyUtil} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.store.db.DbDeserializationException +import com.digitalasset.canton.util.BinaryFileUtil +import com.digitalasset.canton.{DiscardOps, ProtoDeserializationError, checked} +import com.google.common.annotations.VisibleForTesting +import com.google.protobuf.{ByteString, InvalidProtocolBufferException} +import slick.jdbc.{GetResult, PositionedParameters, SetParameter} + +import java.io.{InputStream, OutputStream} +import scala.collection.immutable +import scala.math.Ordered.orderingToOrdered +import scala.util.Try +import scala.util.control.NonFatal + +trait HasRepresentativeProtocolVersion { + // Needs to be a `val` because we need a stable ref. + // @transient because there is no point in serializing it. + // Actual implementations should make this a `lazy val` so that it gets re-initialized after deserialization + @transient protected val companionObj: AnyRef + + /** We have a correspondence {Proto version} <-> {[protocol version]}: each proto version + * correspond to a list of consecutive protocol versions. The representative is one instance + * of this list, usually the smallest value. In other words, the Proto versions induce an + * equivalence relation on the list of protocol version, thus use of `representative`. + * + * The method `protocolVersionRepresentativeFor` below + * allows to query the representative for an equivalence class. + */ + def representativeProtocolVersion: RepresentativeProtocolVersion[companionObj.type] +} + +/** See [[com.digitalasset.canton.version.HasProtocolVersionedWrapper.representativeProtocolVersion]] for more context */ +sealed abstract case class RepresentativeProtocolVersion[ValueCompanion]( + private val v: ProtocolVersion +) extends PrettyPrinting { + + /** When using this method, keep in mind that for a given companion object `C` that implements + * `HasProtocolVersionedWrapperCompanion` and for a protocol version `pv`, then + * `C.protocolVersionRepresentativeFor(pv).representative` is different than `pv`. + * In particular, do not use a representative for a given class to construct a representative + * for another class. + */ + def representative: ProtocolVersion = v + + override def pretty: Pretty[this.type] = prettyOfParam(_.v) +} + +object RepresentativeProtocolVersion { + + implicit val setParameterRepresentativeProtocolVersion + : SetParameter[RepresentativeProtocolVersion[_]] = + (rpv: RepresentativeProtocolVersion[_], pp: PositionedParameters) => pp >> rpv.v + + // As `ValueCompanion` is a phantom type on `RepresentativeProtocolVersion`, + // we can have a single Ordering object for all of them here. + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + implicit def orderRepresentativeProtocolVersion[ValueClass] + : Ordering[RepresentativeProtocolVersion[ValueClass]] = + orderingRepresentativeProtocolVersionInternal + .asInstanceOf[Ordering[RepresentativeProtocolVersion[ValueClass]]] + + private[this] val orderingRepresentativeProtocolVersionInternal + : Ordering[RepresentativeProtocolVersion[Any]] = + Ordering.by(_.representative) + +} + +/** Trait for classes that can be serialized by using ProtoBuf. + * See "CONTRIBUTING.md" for our guidelines on serialization. + * + * This wrapper is to be used when every instance can be tied to a single protocol version. + * Consequently, some attributes of the class may depend on the protocol version (e.g., the signature). + * The protocol version is then bundled with the instance and does not need to + * be passed to the toProtoVersioned, toByteString and getCryptographicEvidence + * methods. + * + * The underlying ProtoClass is [[com.digitalasset.canton.version.UntypedVersionedMessage]] + * but we often specify the typed alias [[com.digitalasset.canton.version.VersionedMessage]] + * instead. + */ +trait HasProtocolVersionedWrapper[ValueClass <: HasRepresentativeProtocolVersion] + extends HasRepresentativeProtocolVersion { + self: ValueClass => + + @transient + override protected val companionObj: HasProtocolVersionedWrapperCompanion[ValueClass, _] + + def isEquivalentTo(protocolVersion: ProtocolVersion): Boolean = + companionObj.protocolVersionRepresentativeFor(protocolVersion) == representativeProtocolVersion + + private def serializeToHighestVersion: VersionedMessage[ValueClass] = { + VersionedMessage( + companionObj.supportedProtoVersions.higherConverter.serializer(self), + companionObj.supportedProtoVersions.higherProtoVersion.v, + ) + } + + /** Will check that default value rules defined in `companionObj.defaultValues` hold. + */ + def validateInstance(): Either[String, Unit] = + companionObj.validateInstance(this, representativeProtocolVersion) + + /** Yields the proto representation of the class inside an `UntypedVersionedMessage` wrapper. + * + * Subclasses should make this method public by default, as this supports composing proto serializations. + * Keep it protected, if there are good reasons for it + * (e.g. [[com.digitalasset.canton.serialization.ProtocolVersionedMemoizedEvidence]]). + * + * Be aware that if calling on a class that defines a LegacyProtoConverter, this method will still + * return a VersionedMessage. If the current protocol version maps to the + * legacy converter, deserialization will then fail (as it will try to deserialize to the raw protobuf instead of the + * VersionedMessage wrapper this was serialized to. + * Prefer using toByteString which handles this use case correctly. + */ + def toProtoVersioned: VersionedMessage[ValueClass] = + companionObj.supportedProtoVersions.converters + .collectFirst { + case (protoVersion, supportedVersion) + if representativeProtocolVersion >= supportedVersion.fromInclusive => + VersionedMessage(supportedVersion.serializer(self), protoVersion.v) + } + .getOrElse(serializeToHighestVersion) + + /** Yields the Proto version that this class will be serialized to + */ + def protoVersion: ProtoVersion = + companionObj.protoVersionFor(representativeProtocolVersion) + + /** Yields a byte string representation of the corresponding `UntypedVersionedMessage` wrapper of this instance. + */ + def toByteString: ByteString = companionObj.supportedProtoVersions.converters + .collectFirst { + case (protoVersion, supportedVersion) + if representativeProtocolVersion >= supportedVersion.fromInclusive => + supportedVersion match { + case versioned if versioned.isVersioned => + VersionedMessage(supportedVersion.serializer(self), protoVersion.v).toByteString + case legacy => + legacy.serializer(self) + } + } + .getOrElse(serializeToHighestVersion.toByteString) + + /** Serializes this instance to a message together with a delimiter (the message length) to the given output stream. + * + * This method works in conjunction with + * [[com.digitalasset.canton.version.HasProtocolVersionedCompanion2.parseDelimitedFrom]] which deserializes the + * message again. It is useful for serializing multiple messages to a single output stream through multiple + * invocations. + * + * Serialization is only supported for + * [[com.digitalasset.canton.version.HasSupportedProtoVersions.VersionedProtoConverter]], an error message is + * returned otherwise. + * + * @param output the sink to which this message is serialized to + * @return an Either where left represents an error message, and right represents a successful message + * serialization + */ + def writeDelimitedTo(output: OutputStream): Either[String, Unit] = { + val converter: Either[String, VersionedMessage[ValueClass]] = + companionObj.supportedProtoVersions.converters + .collectFirst { + case (protoVersion, supportedVersion) + if representativeProtocolVersion >= supportedVersion.fromInclusive => + supportedVersion match { + case companionObj.VersionedProtoConverter(_, _, serializer) => + Right(VersionedMessage(serializer(self), protoVersion.v)) + case other => + Left( + s"Cannot call writeDelimitedTo on ${companionObj.name} in protocol version equivalent to ${other.fromInclusive.representative}" + ) + } + } + .getOrElse(Right(serializeToHighestVersion)) + + converter.flatMap(actual => + Try(actual.writeDelimitedTo(output)).toEither.leftMap(e => + s"Cannot serialize ${companionObj.name} into the given output stream due to: ${e.getMessage}" + ) + ) + } + + /** Yields a byte array representation of the corresponding `UntypedVersionedMessage` wrapper of this instance. + */ + def toByteArray: Array[Byte] = toByteString.toByteArray + + def writeToFile(outputFile: String): Unit = + BinaryFileUtil.writeByteStringToFile(outputFile, toByteString) + + /** Casts this instance's representative protocol version to one for the target type. + * This only succeeds if the versioning schemes are the same. + */ + @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) + def castRepresentativeProtocolVersion[T <: HasSupportedProtoVersions[_]]( + target: T + ): Either[String, RepresentativeProtocolVersion[T]] = { + val sourceTable = companionObj.supportedProtoVersions.table + val targetTable = target.supportedProtoVersions.table + + Either.cond( + sourceTable == targetTable, + representativeProtocolVersion.asInstanceOf[RepresentativeProtocolVersion[T]], + "Source and target versioning schemes should be the same", + ) + } +} + +/** This trait has the logic to store proto (de)serializers and retrieve them by protocol version. + * @tparam ValueClass + */ +trait HasSupportedProtoVersions[ValueClass] { + + /** The name of the class as used for pretty-printing and error reporting */ + def name: String + + // Deserializer: (Proto => ValueClass) + type Deserializer + // Serializer: (ValueClass => Proto) + type Serializer = ValueClass => ByteString + + protected type ThisRepresentativeProtocolVersion = RepresentativeProtocolVersion[this.type] + + trait Invariant { + def validateInstance( + v: ValueClass, + rpv: ThisRepresentativeProtocolVersion, + ): Either[String, Unit] + } + + private[version] sealed trait InvariantImpl[T] extends Invariant with Product with Serializable { + def attribute: ValueClass => T + def validate(v: T, pv: ProtocolVersion): Either[String, Unit] + def validate(v: T, rpv: ThisRepresentativeProtocolVersion): Either[String, Unit] + def validateInstance( + v: ValueClass, + rpv: ThisRepresentativeProtocolVersion, + ): Either[String, Unit] = + validate(attribute(v), rpv) + } + + /* + This trait encodes a default value starting (or ending) at a specific protocol version. + */ + private[version] sealed trait DefaultValue[T] extends InvariantImpl[T] { + + def defaultValue: T + + /** Returns `v` or the default value, depending on the `protocolVersion`. + */ + def orValue(v: T, protocolVersion: ProtocolVersion): T + + /** Returns `v` or the default value, depending on the `protocolVersion`. + */ + def orValue(v: T, protocolVersion: ThisRepresentativeProtocolVersion): T + + override def validate(v: T, rpv: ThisRepresentativeProtocolVersion): Either[String, Unit] = + validate(v, rpv.representative) + } + + case class DefaultValueFromInclusive[T]( + attribute: ValueClass => T, + attributeName: String, + startInclusive: ThisRepresentativeProtocolVersion, + defaultValue: T, + ) extends DefaultValue[T] { + def orValue(v: T, protocolVersion: ProtocolVersion): T = + if (protocolVersion >= startInclusive.representative) defaultValue else v + + def orValue(v: T, protocolVersion: ThisRepresentativeProtocolVersion): T = + if (protocolVersion >= startInclusive) defaultValue else v + + override def validate( + v: T, + pv: ProtocolVersion, + ): Either[String, Unit] = { + val shouldHaveDefaultValue = pv >= startInclusive.representative + + Either.cond( + !shouldHaveDefaultValue || v == defaultValue, + (), + s"expected default value for $attributeName in $name but found $v", + ) + } + } + + case class DefaultValueUntilExclusive[T]( + attribute: ValueClass => T, + attributeName: String, + untilExclusive: ThisRepresentativeProtocolVersion, + defaultValue: T, + ) extends DefaultValue[T] { + def orValue(v: T, protocolVersion: ProtocolVersion): T = + if (protocolVersion < untilExclusive.representative) defaultValue else v + + def orValue(v: T, protocolVersion: ThisRepresentativeProtocolVersion): T = + if (protocolVersion < untilExclusive) defaultValue else v + + override def validate( + v: T, + pv: ProtocolVersion, + ): Either[String, Unit] = { + val shouldHaveDefaultValue = pv < untilExclusive.representative + + Either.cond( + !shouldHaveDefaultValue || v == defaultValue, + (), + s"expected default value for $attributeName in $name but found $v", + ) + } + } + + case class EmptyOptionExactlyUntilExclusive[T]( + attribute: ValueClass => Option[T], + attributeName: String, + untilExclusive: ThisRepresentativeProtocolVersion, + ) extends DefaultValue[Option[T]] { + val defaultValue: Option[T] = None + + def orValue(v: Option[T], protocolVersion: ProtocolVersion): Option[T] = + if (protocolVersion < untilExclusive.representative) defaultValue else v + + def orValue(v: Option[T], protocolVersion: ThisRepresentativeProtocolVersion): Option[T] = + if (protocolVersion < untilExclusive) defaultValue else v + + override def validate( + v: Option[T], + pv: ProtocolVersion, + ): Either[String, Unit] = + Either.cond( + v.isEmpty == pv < untilExclusive.representative, + (), + s"expecting None if and only if pv < ${untilExclusive.representative}; for $pv, found: $v", + ) + } + + def invariants: Seq[Invariant] = Nil + + def protocolVersionRepresentativeFor( + protocolVersion: ProtocolVersion + ): RepresentativeProtocolVersion[this.type] = + supportedProtoVersions.protocolVersionRepresentativeFor(protocolVersion) + + def protocolVersionRepresentativeFor( + protoVersion: ProtoVersion + ): RepresentativeProtocolVersion[this.type] = + supportedProtoVersions.protocolVersionRepresentativeFor(protoVersion) + + /** Return the Proto version corresponding to the representative protocol version + */ + def protoVersionFor( + protocolVersion: RepresentativeProtocolVersion[this.type] + ): ProtoVersion = supportedProtoVersions.protoVersionFor(protocolVersion) + + /** Return the Proto version corresponding to the protocol version + */ + def protoVersionFor(protocolVersion: ProtocolVersion): ProtoVersion = + supportedProtoVersions.protoVersionFor(protocolVersionRepresentativeFor(protocolVersion)) + + /** Base class for (de)serializating from/to protobuf of ValueClass from a specific PV + */ + sealed trait ProtoCodec { + def fromInclusive: RepresentativeProtocolVersion[HasSupportedProtoVersions.this.type] + def deserializer: Deserializer + def serializer: Serializer + // Can't always rely on the subtype to differentiate between instances of ProtoCodec, because the type is erased + // at compile time when it is a dependent type of ValueClass (e.g in HasProtocolVersionedWrapper). + // Instead use this method to differentiate between versioned and un-versioned serialization + def isVersioned: Boolean + def isSupported: Boolean + } + + /** Supported Proto version + * @param fromInclusive The protocol version when this Proto version was introduced + * @param deserializer Deserialization method + * @param serializer Serialization method + */ + protected[this] case class VersionedProtoConverter private ( + fromInclusive: RepresentativeProtocolVersion[HasSupportedProtoVersions.this.type], + deserializer: Deserializer, + serializer: Serializer, + ) extends ProtoCodec + with PrettyPrinting { + override val isVersioned: Boolean = true + override val isSupported: Boolean = true + + override def pretty: Pretty[this.type] = + prettyOfClass( + unnamedParam(_ => HasSupportedProtoVersions.this.getClass.getSimpleName.unquoted), + param("fromInclusive", _.fromInclusive), + ) + } + + object VersionedProtoConverter { + def apply[ProtoClass <: scalapb.GeneratedMessage, Status <: ProtocolVersion.Status]( + fromInclusive: ProtocolVersion.ProtocolVersionWithStatus[Status] + )( + protoCompanion: scalapb.GeneratedMessageCompanion[ProtoClass] & Status + )( + parser: scalapb.GeneratedMessageCompanion[ProtoClass] => Deserializer, + serializer: Serializer, + ): VersionedProtoConverter = + raw(fromInclusive, parser(protoCompanion), serializer) + + def storage[ProtoClass <: scalapb.GeneratedMessage]( + fromInclusive: ReleaseProtocolVersion, + protoCompanion: scalapb.GeneratedMessageCompanion[ProtoClass] & StorageProtoVersion, + )( + parser: scalapb.GeneratedMessageCompanion[ProtoClass] => Deserializer, + serializer: Serializer, + ): VersionedProtoConverter = raw(fromInclusive.v, parser(protoCompanion), serializer) + + @VisibleForTesting + def raw( + fromInclusive: ProtocolVersion, + deserializer: Deserializer, + serializer: Serializer, + ): VersionedProtoConverter = VersionedProtoConverter( + new RepresentativeProtocolVersion[HasSupportedProtoVersions.this.type](fromInclusive) {}, + deserializer, + serializer, + ) + } + + protected def deserializationErrorK(error: ProtoDeserializationError): Deserializer + + protected[this] case class UnsupportedProtoCodec( + fromInclusive: RepresentativeProtocolVersion[HasSupportedProtoVersions.this.type] + ) extends ProtoCodec + with PrettyPrinting { + override val isVersioned: Boolean = false + override val isSupported: Boolean = false + + private def valueClassName: String = HasSupportedProtoVersions.this.getClass.getSimpleName + + def deserializationError: ProtoDeserializationError = ProtoDeserializationError.OtherError( + s"Cannot deserialize $valueClassName in protocol version equivalent to ${fromInclusive.representative}" + ) + override def deserializer: Deserializer = deserializationErrorK(deserializationError) + override def serializer: Serializer = throw new UnsupportedOperationException( + s"Cannot serialize $valueClassName in protocol version equivalent to ${fromInclusive.representative}" + ) + override def pretty: Pretty[this.type] = prettyOfClass( + unnamedParam(_.valueClassName.unquoted), + param("fromInclusive", _.fromInclusive), + ) + } + + object UnsupportedProtoCodec { + def apply(fromInclusive: ProtocolVersion): UnsupportedProtoCodec = + new UnsupportedProtoCodec( + new RepresentativeProtocolVersion[HasSupportedProtoVersions.this.type](fromInclusive) {} + ) + } + + case class SupportedProtoVersions private ( + // Sorted with descending order + converters: NonEmpty[immutable.SortedMap[ProtoVersion, ProtoCodec]] + ) { + val (higherProtoVersion, higherConverter) = converters.head1 + + def converterFor(protocolVersion: ProtocolVersion): ProtoCodec = + converters + .collectFirst { + case (_, converter) if protocolVersion >= converter.fromInclusive.representative => + converter + } + .getOrElse(higherConverter) + + def deserializerFor(protoVersion: ProtoVersion): Deserializer = + converters.get(protoVersion).map(_.deserializer).getOrElse(higherConverter.deserializer) + + def protoVersionFor( + protocolVersion: RepresentativeProtocolVersion[HasSupportedProtoVersions.this.type] + ): ProtoVersion = converters + .collectFirst { + case (protoVersion, converter) if protocolVersion >= converter.fromInclusive => + protoVersion + } + .getOrElse(higherProtoVersion) + + // TODO(#15628) We should not have a fallback here. Change return type to an either and propagate + def protocolVersionRepresentativeFor( + protoVersion: ProtoVersion + ): RepresentativeProtocolVersion[HasSupportedProtoVersions.this.type] = + table.getOrElse(protoVersion, higherConverter.fromInclusive) + + def protocolVersionRepresentativeFor( + protocolVersion: ProtocolVersion + ): RepresentativeProtocolVersion[HasSupportedProtoVersions.this.type] = converterFor( + protocolVersion + ).fromInclusive + + lazy val table + : Map[ProtoVersion, RepresentativeProtocolVersion[HasSupportedProtoVersions.this.type]] = + converters.forgetNE.fmap(_.fromInclusive) + } + + object SupportedProtoVersions { + def apply( + head: (ProtoVersion, ProtoCodec), + tail: (ProtoVersion, ProtoCodec)* + ): SupportedProtoVersions = SupportedProtoVersions.fromNonEmpty( + NonEmpty.mk(Seq, head, tail: _*) + ) + + /* + Throws an error if a protocol version or a protobuf version is used twice. + This indicates an error in the converters list: + - Each protobuf version should appear only once. + - Each protobuf version should use a different minimum protocol version. + */ + private def ensureNoDuplicates(converters: NonEmpty[Seq[(ProtoVersion, ProtoCodec)]]): Unit = { + + val versions: Seq[(ProtoVersion, ProtocolVersion)] = converters.forgetNE.map { + case (protoVersion, codec) => + (protoVersion, codec.fromInclusive.representative) + } + + def getDuplicates[T]( + proj: ((ProtoVersion, ProtocolVersion)) => T + ): Option[NonEmpty[List[T]]] = { + val duplicates = versions + .groupBy(proj) + .toList + .collect { + case (_, versions) if versions.lengthCompare(1) > 0 => + versions.map(proj) + } + .flatten + + NonEmpty.from(duplicates) + } + + val duplicatedProtoVersion = getDuplicates(_._1) + val duplicatedProtocolVersion = getDuplicates(_._2) + + duplicatedProtoVersion.foreach { duplicates => + throw new IllegalArgumentException( + s"Some protobuf versions appear several times in `$name`: $duplicates" + ) + }.discard + + duplicatedProtocolVersion.foreach { duplicates => + throw new IllegalArgumentException( + s"Some protocol versions appear several times in `$name`: $duplicates" + ) + }.discard + } + + private def fromNonEmpty( + converters: NonEmpty[Seq[(ProtoVersion, ProtoCodec)]] + ): SupportedProtoVersions = { + ensureNoDuplicates(converters) + + val sortedConverters = checked( + NonEmptyUtil.fromUnsafe( + immutable.SortedMap.from(converters)(implicitly[Ordering[ProtoVersion]].reverse) + ) + ) + val (_, lowestProtocolVersion) = sortedConverters.last1 + + // If you are hitting this require failing when your message doesn't exist in PV.minimum, + // remember to specify that explicitly by adding to the SupportedProtoVersions: + // ProtoVersion(-1) -> UnsupportedProtoCodec(ProtocolVersion.minimum), + require( + lowestProtocolVersion.fromInclusive.representative == ProtocolVersion.minimum, + s"ProtocolVersion corresponding to lowest proto version should be ${ProtocolVersion.minimum}, found $lowestProtocolVersion", + ) + + SupportedProtoVersions(sortedConverters) + } + } + + /** Proto versions that are supported by `fromProtoVersioned` and `fromByteString` + * See the helper `supportedProtoVersion` below to define a `Parser`. + */ + def supportedProtoVersions: SupportedProtoVersions +} + +trait HasProtocolVersionedWrapperCompanion[ + ValueClass <: HasRepresentativeProtocolVersion, + DeserializedValueClass, +] extends HasSupportedProtoVersions[ValueClass] + with Serializable { + + /** The name of the class as used for pretty-printing and error reporting */ + def name: String + + type OriginalByteString = ByteString // What is passed to the fromByteString method + type DataByteString = ByteString // What is inside the parsed UntypedVersionedMessage message + + /** Will check that default value rules defined in `companionObj.defaultValues` hold. + */ + def validateInstance( + instance: ValueClass, + representativeProtocolVersion: ThisRepresentativeProtocolVersion, + ): Either[String, Unit] = + invariants.traverse_(_.validateInstance(instance, representativeProtocolVersion)) + + protected def deserializeForVersion( + rpv: RepresentativeProtocolVersion[this.type], + deserializeVersionedProto: => ParsingResult[DeserializedValueClass], + ): ParsingResult[DeserializedValueClass] = { + val converter = + supportedProtoVersions.converterFor(rpv.representative) + + converter match { + case _: VersionedProtoConverter => deserializeVersionedProto + case unsupported: UnsupportedProtoCodec => + Left(unsupported.deserializationError) + } + } +} + +trait HasProtocolVersionedWrapperWithoutContextCompanion[ + ValueClass <: HasRepresentativeProtocolVersion, + DeserializedValueClass, +] extends HasProtocolVersionedWrapperCompanion[ValueClass, DeserializedValueClass] { + def fromByteString(bytes: OriginalByteString): ParsingResult[DeserializedValueClass] +} + +/** Trait for companion objects of serializable classes with memoization. + * Use this class if deserialization produces a different type than where serialization starts. + * For example, if a container can serialize its elements, but the container's deserializer + * does not deserialize the elements and instead leaves them as Bytestring. + * + * Use [[HasMemoizedProtocolVersionedWrapperCompanion]] if the type distinction between serialization and deseserialization is not needed. + */ +trait HasMemoizedProtocolVersionedWrapperCompanion2[ + ValueClass <: HasRepresentativeProtocolVersion, + DeserializedValueClass, +] extends HasProtocolVersionedWrapperWithoutContextCompanion[ValueClass, DeserializedValueClass] { + // Deserializer: (Proto => DeserializedValueClass) + override type Deserializer = + (OriginalByteString, DataByteString) => ParsingResult[DeserializedValueClass] + + protected def supportedProtoVersionMemoized[Proto <: scalapb.GeneratedMessage]( + p: scalapb.GeneratedMessageCompanion[Proto] + )( + fromProto: Proto => (OriginalByteString => ParsingResult[DeserializedValueClass]) + ): Deserializer = + (original: OriginalByteString, data: DataByteString) => + ProtoConverter.protoParser(p.parseFrom)(data).flatMap(fromProto(_)(original)) + + def fromByteArray(bytes: Array[Byte]): ParsingResult[DeserializedValueClass] = fromByteString( + ByteString.copyFrom(bytes) + ) + + override def fromByteString(bytes: OriginalByteString): ParsingResult[DeserializedValueClass] = + for { + proto <- ProtoConverter.protoParser(UntypedVersionedMessage.parseFrom)(bytes) + data <- proto.wrapper.data.toRight(ProtoDeserializationError.FieldNotSet(s"$name: data")) + valueClass <- supportedProtoVersions + .deserializerFor(ProtoVersion(proto.version))(bytes, data) + } yield valueClass + + /** Use this method when deserializing bytes for classes that have a legacy proto converter to explicitly + * set the version to use for the deserialization. + * @param protoVersion Proto version of the bytes to be deserialized + * @param bytes data + */ + def fromByteString( + protoVersion: ProtoVersion + )(bytes: OriginalByteString): ParsingResult[DeserializedValueClass] = { + deserializeForVersion( + protocolVersionRepresentativeFor(protoVersion), + fromByteString(bytes), + ) + } + + override protected def deserializationErrorK( + error: ProtoDeserializationError + ): (OriginalByteString, DataByteString) => ParsingResult[DeserializedValueClass] = + (_, _) => Left(error) +} + +/** Trait for companion objects of serializable classes with memoization and a (de)serialization context. + * Use this class if deserialization produces a different type than where serialization starts. + * For example, if a container can serialize its elements, but the container's deserializer + * does not deserialize the elements and instead leaves them as Bytestring. + * + * Use [[HasMemoizedProtocolVersionedWithContextCompanion]] if the type distinction between serialization and deseserialization is not needed. + */ +trait HasMemoizedProtocolVersionedWithContextCompanion2[ + ValueClass <: HasRepresentativeProtocolVersion, + DeserializedValueClass, + Context, +] extends HasProtocolVersionedWrapperCompanion[ValueClass, DeserializedValueClass] { + override type Deserializer = + (Context, OriginalByteString, DataByteString) => ParsingResult[DeserializedValueClass] + + protected def supportedProtoVersionMemoized[Proto <: scalapb.GeneratedMessage]( + p: scalapb.GeneratedMessageCompanion[Proto] + )( + fromProto: (Context, Proto) => (OriginalByteString => ParsingResult[DeserializedValueClass]) + ): Deserializer = + (ctx: Context, original: OriginalByteString, data: DataByteString) => + ProtoConverter.protoParser(p.parseFrom)(data).flatMap(fromProto(ctx, _)(original)) + + def fromByteString( + context: Context + )(bytes: OriginalByteString): ParsingResult[DeserializedValueClass] = for { + proto <- ProtoConverter.protoParser(UntypedVersionedMessage.parseFrom)(bytes) + data <- proto.wrapper.data.toRight(ProtoDeserializationError.FieldNotSet(s"$name: data")) + valueClass <- supportedProtoVersions + .deserializerFor(ProtoVersion(proto.version))(context, bytes, data) + } yield valueClass + + def fromByteArray(context: Context)(bytes: Array[Byte]): ParsingResult[DeserializedValueClass] = + fromByteString(context)(ByteString.copyFrom(bytes)) + + override protected def deserializationErrorK( + error: ProtoDeserializationError + ): (Context, OriginalByteString, DataByteString) => ParsingResult[DeserializedValueClass] = + (_, _, _) => Left(error) +} + +/** Trait for companion objects of serializable classes without memoization. + * Use this class if deserialization produces a different type than where serialization starts. + * For example, if a container can serialize its elements, but the container's deserializer + * does not deserialize the elements and instead leaves them as Bytestring. + * + * Use [[HasProtocolVersionedCompanion]] if the type distinction between serialization and deseserialization is not needed. + */ +trait HasProtocolVersionedCompanion2[ + ValueClass <: HasRepresentativeProtocolVersion, + DeserializedValueClass, +] extends HasProtocolVersionedWrapperWithoutContextCompanion[ValueClass, DeserializedValueClass] { + override type Deserializer = DataByteString => ParsingResult[DeserializedValueClass] + + protected def supportedProtoVersion[Proto <: scalapb.GeneratedMessage]( + p: scalapb.GeneratedMessageCompanion[Proto] + )( + fromProto: Proto => ParsingResult[DeserializedValueClass] + ): Deserializer = + (data: DataByteString) => ProtoConverter.protoParser(p.parseFrom)(data).flatMap(fromProto) + + def fromByteArray(bytes: Array[Byte]): ParsingResult[DeserializedValueClass] = for { + proto <- ProtoConverter.protoParserArray(UntypedVersionedMessage.parseFrom)(bytes) + valueClass <- fromProtoVersioned(VersionedMessage(proto)) + } yield valueClass + + def fromProtoVersioned( + proto: VersionedMessage[DeserializedValueClass] + ): ParsingResult[DeserializedValueClass] = + proto.wrapper.data.toRight(ProtoDeserializationError.FieldNotSet(s"$name: data")).flatMap { + supportedProtoVersions.deserializerFor(ProtoVersion(proto.version)) + } + + override def fromByteString(bytes: OriginalByteString): ParsingResult[DeserializedValueClass] = + for { + proto <- ProtoConverter.protoParser(UntypedVersionedMessage.parseFrom)(bytes) + valueClass <- fromProtoVersioned(VersionedMessage(proto)) + } yield valueClass + + /** Deserializes a message using a delimiter (the message length) from the given input stream. + * + * This method works in conjunction with + * [[com.digitalasset.canton.version.HasProtocolVersionedWrapper.writeDelimitedTo]] which should have been used to + * serialize the message. It is useful for deserializing multiple messages from a single input stream through + * repeated invocations. + * + * Deserialization is only supported for [[com.digitalasset.canton.version.VersionedMessage]]. + * + * @param input the source from which a message is deserialized + * @return an Option that is None when there are no messages left anymore, otherwise it wraps an Either + * where left represents a deserialization error (exception) and right represents the successfully + * deserialized message + */ + def parseDelimitedFrom(input: InputStream): Option[ParsingResult[DeserializedValueClass]] = { + try { + UntypedVersionedMessage + .parseDelimitedFrom(input) + .map(VersionedMessage[DeserializedValueClass]) + .map(fromProtoVersioned) + } catch { + case protoBuffException: InvalidProtocolBufferException => + Some(Left(ProtoDeserializationError.BufferException(protoBuffException))) + case NonFatal(e) => + Some(Left(ProtoDeserializationError.OtherError(e.getMessage))) + } + } + + /** Use this method when deserializing bytes for classes that have a legacy proto converter to explicitly + * set the version to use for the deserialization. + * @param protocolVersion protocol version of the bytes to be deserialized + * @param bytes data + */ + def fromByteString( + protocolVersion: ProtocolVersion + )(bytes: OriginalByteString): ParsingResult[DeserializedValueClass] = { + deserializeForVersion( + protocolVersionRepresentativeFor(protocolVersion), + fromByteString(bytes), + ) + } + + def readFromFile( + inputFile: String + ): Either[String, DeserializedValueClass] = { + for { + bs <- BinaryFileUtil.readByteStringFromFile(inputFile) + value <- fromByteString(bs).leftMap(_.toString) + } yield value + } + + def tryReadFromFile(inputFile: String): DeserializedValueClass = + readFromFile(inputFile).valueOr(err => + throw new IllegalArgumentException(s"Reading $name from file $inputFile failed: $err") + ) + + implicit def hasVersionedWrapperGetResult(implicit + getResultByteArray: GetResult[Array[Byte]] + ): GetResult[DeserializedValueClass] = GetResult { r => + fromByteArray(r.<<[Array[Byte]]).valueOr(err => + throw new DbDeserializationException(s"Failed to deserialize $name: $err") + ) + } + + implicit def hasVersionedWrapperGetResultO(implicit + getResultByteArray: GetResult[Option[Array[Byte]]] + ): GetResult[Option[DeserializedValueClass]] = GetResult { r => + r.<<[Option[Array[Byte]]] + .map( + fromByteArray(_).valueOr(err => + throw new DbDeserializationException(s"Failed to deserialize $name: $err") + ) + ) + } + + override protected def deserializationErrorK( + error: ProtoDeserializationError + ): DataByteString => ParsingResult[DeserializedValueClass] = _ => Left(error) +} + +trait HasProtocolVersionedWithContextCompanion[ + ValueClass <: HasRepresentativeProtocolVersion, + Context, +] extends HasProtocolVersionedWrapperCompanion[ValueClass, ValueClass] { + override type Deserializer = (Context, DataByteString) => ParsingResult[ValueClass] + + protected def supportedProtoVersion[Proto <: scalapb.GeneratedMessage]( + p: scalapb.GeneratedMessageCompanion[Proto] + )( + fromProto: (Context, Proto) => ParsingResult[ValueClass] + ): Deserializer = + (ctx: Context, data: DataByteString) => + ProtoConverter.protoParser(p.parseFrom)(data).flatMap(fromProto(ctx, _)) + + def fromProtoVersioned( + context: Context + )(proto: VersionedMessage[ValueClass]): ParsingResult[ValueClass] = + proto.wrapper.data.toRight(ProtoDeserializationError.FieldNotSet(s"$name: data")).flatMap { + supportedProtoVersions.deserializerFor(ProtoVersion(proto.version))(context, _) + } + + def fromByteString(context: Context)(bytes: OriginalByteString): ParsingResult[ValueClass] = for { + proto <- ProtoConverter.protoParser(UntypedVersionedMessage.parseFrom)(bytes) + valueClass <- fromProtoVersioned(context)(VersionedMessage(proto)) + } yield valueClass + + /** Use this method when deserializing bytes for classes that have a legacy proto converter to explicitly + * set the Proto version to use for the deserialization. + * @param protoVersion Proto version of the bytes to be deserialized + * @param bytes data + */ + def fromByteString( + protoVersion: ProtoVersion + )(context: Context)(bytes: OriginalByteString): ParsingResult[ValueClass] = { + deserializeForVersion( + protocolVersionRepresentativeFor(protoVersion), + fromByteString(context)(bytes), + ) + } + + /** Use this method when deserializing bytes for classes that have a legacy proto converter to explicitly + * set the protocol version to use for the deserialization. + * @param protocolVersion protocol version of the bytes to be deserialized + * @param bytes data + */ + def fromByteString( + protocolVersion: ProtocolVersion + )(context: Context)(bytes: OriginalByteString): ParsingResult[ValueClass] = { + deserializeForVersion( + protocolVersionRepresentativeFor(protocolVersion), + fromByteString(context)(bytes), + ) + } + + override protected def deserializationErrorK( + error: ProtoDeserializationError + ): (Context, DataByteString) => ParsingResult[ValueClass] = (_, _) => Left(error) +} + +trait ProtocolVersionedCompanionDbHelpers[ValueClass <: HasProtocolVersionedWrapper[ValueClass]] { + def getVersionedSetParameter(implicit + setParameterByteArray: SetParameter[Array[Byte]] + ): SetParameter[ValueClass] = { (value, pp) => + pp >> value.toByteArray + } + + def getVersionedSetParameterO(implicit + setParameterByteArrayO: SetParameter[Option[Array[Byte]]] + ): SetParameter[Option[ValueClass]] = (valueO, pp) => pp >> valueO.map(_.toByteArray) +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/HasVersionedToByteString.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/HasVersionedToByteString.scala new file mode 100644 index 0000000000..729deaf93b --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/HasVersionedToByteString.scala @@ -0,0 +1,25 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.version + +import com.google.protobuf.ByteString + +/** Trait for classes that can be serialized to a [[com.google.protobuf.ByteString]]. + * Typical use cases of toByteString include: + *
    + *
  • saving data in the database in serialized form (e.g. as in SequencedEvent)
  • + *
  • encrypting data (e.g. as in Encryption.scala)
  • * + *
+ * In some exceptional cases, we also convert an object to a ByteString before including it in a Proto message (e.g. ViewCommonData or Envelope) + * + * Classes that use Protobuf for serialization should implement [[HasVersionedWrapper]] instead. + * See "CONTRIBUTING.md" for our guidelines on serialization. + */ +trait HasVersionedToByteString { + + /** Returns the serialization of the object into a [[com.google.protobuf.ByteString]]. + * This method may yield different results if it is invoked several times. + */ + def toByteString(version: ProtocolVersion): ByteString +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/HasVersionedWrapper.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/HasVersionedWrapper.scala new file mode 100644 index 0000000000..3de607349d --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/HasVersionedWrapper.scala @@ -0,0 +1,274 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.version + +import cats.syntax.either.* +import com.daml.nonempty.{NonEmpty, NonEmptyUtil} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.store.db.DbDeserializationException +import com.digitalasset.canton.util.BinaryFileUtil +import com.digitalasset.canton.{DiscardOps, ProtoDeserializationError, checked} +import com.google.protobuf.ByteString +import slick.jdbc.{GetResult, SetParameter} + +import scala.collection.immutable + +/** Trait for classes that can be serialized by using ProtoBuf. + * See "CONTRIBUTING.md" for our guidelines on serialization. + * + * This wrapper is to be used if a single instance needs to be serialized to different proto versions. + * + * The underlying ProtoClass is [[com.digitalasset.canton.version.UntypedVersionedMessage]] + * but we often specify the typed alias [[com.digitalasset.canton.version.VersionedMessage]] + * instead. + */ +trait HasVersionedWrapper[ValueClass] extends HasVersionedToByteString { + self: ValueClass => + + protected def companionObj: HasVersionedMessageCompanionCommon[ValueClass] + + /** Yields the proto representation of the class inside an `UntypedVersionedMessage` wrapper. + * + * Subclasses should make this method public by default, as this supports composing proto serializations. + * Keep it protected, if there are good reasons for it + * (e.g. [[com.digitalasset.canton.serialization.ProtocolVersionedMemoizedEvidence]]). + */ + def toProtoVersioned(version: ProtocolVersion): VersionedMessage[ValueClass] = + companionObj.supportedProtoVersions.converters + .collectFirst { + case (protoVersion, supportedVersion) if version >= supportedVersion.fromInclusive => + VersionedMessage(supportedVersion.serializer(self), protoVersion.v) + } + .getOrElse(serializeToHighestVersion) + + private def serializeToHighestVersion: VersionedMessage[ValueClass] = { + VersionedMessage( + companionObj.supportedProtoVersions.higherConverter.serializer(self), + companionObj.supportedProtoVersions.higherProtoVersion.v, + ) + } + + /** Yields a byte string representation of the corresponding `UntypedVersionedMessage` wrapper of this instance. + */ + override def toByteString(version: ProtocolVersion): ByteString = toProtoVersioned( + version + ).toByteString + + /** Yields a byte array representation of the corresponding `UntypedVersionedMessage` wrapper of this instance. + */ + def toByteArray(version: ProtocolVersion): Array[Byte] = toByteString(version).toByteArray + + /** Writes the byte string representation of the corresponding `UntypedVersionedMessage` wrapper of this instance to a file. */ + def writeToFile(outputFile: String, version: ProtocolVersion = ProtocolVersion.latest): Unit = { + val bytes = toByteString(version) + BinaryFileUtil.writeByteStringToFile(outputFile, bytes) + } +} + +// Implements shared behavior of [[HasVersionedMessageCompanion]] and [[HasVersionedMessageWithContextCompanion]] +trait HasVersionedMessageCompanionCommon[ValueClass] { + + /** The name of the class as used for pretty-printing and error reporting */ + def name: String + + type Serializer = ValueClass => ByteString + type Deserializer + + /** Proto versions that are supported by `fromProtoVersioned`, `fromByteString`, + * `toProtoVersioned` and `toByteString`. + * See the helpers `supportedProtoVersion` and `supportedProtoVersionMemoized` + * below to define a `ProtoCodec`. + */ + def supportedProtoVersions: SupportedProtoVersions + + case class ProtoCodec( + fromInclusive: ProtocolVersion, + deserializer: Deserializer, + serializer: Serializer, + ) + + case class SupportedProtoVersions private ( + // Sorted with descending order + converters: NonEmpty[immutable.SortedMap[ProtoVersion, ProtoCodec]] + ) { + val (higherProtoVersion, higherConverter) = converters.head1 + + def converterFor(protocolVersion: ProtocolVersion): ProtoCodec = + converters + .collectFirst { + case (_, converter) if protocolVersion >= converter.fromInclusive => + converter + } + .getOrElse(higherConverter) + + def deserializerFor(protoVersion: ProtoVersion): Deserializer = + converters.get(protoVersion).map(_.deserializer).getOrElse(higherConverter.deserializer) + } + + object SupportedProtoVersions { + def apply( + head: (ProtoVersion, ProtoCodec), + tail: (ProtoVersion, ProtoCodec)* + ): SupportedProtoVersions = SupportedProtoVersions.fromNonEmpty( + NonEmpty.mk(Seq, head, tail: _*) + ) + + /* + Throws an error if a protocol version is used twice. + This indicates an error in the converters list since one protocol version + cannot correspond to two proto versions. + */ + private def ensureNoDuplicates(converters: NonEmpty[Seq[(ProtoVersion, ProtoCodec)]]): Unit = + NonEmpty + .from { + converters.forgetNE + .groupMap { case (_, codec) => codec.fromInclusive } { case (protoVersion, _) => + protoVersion + } + .filter { case (_, protoVersions) => protoVersions.lengthCompare(1) > 0 } + .toList + } + .foreach { duplicates => + throw new IllegalArgumentException( + s"Some protocol versions appear several times in `$name`: $duplicates " + ) + } + .discard + + private def fromNonEmpty( + converters: NonEmpty[Seq[(ProtoVersion, ProtoCodec)]] + ): SupportedProtoVersions = { + + ensureNoDuplicates(converters) + + val sortedConverters = checked( + NonEmptyUtil.fromUnsafe( + immutable.SortedMap.from(converters)(implicitly[Ordering[ProtoVersion]].reverse) + ) + ) + val (_, lowestProtocolVersion) = sortedConverters.last1 + + require( + lowestProtocolVersion.fromInclusive == ProtocolVersion.minimum, + s"ProtocolVersion corresponding to lowest proto version should be ${ProtocolVersion.minimum}, found ${lowestProtocolVersion.fromInclusive}", + ) + + SupportedProtoVersions(sortedConverters) + } + } +} + +/** Traits for the companion objects of classes that implement [[HasVersionedWrapper]]. + * Provide default methods. + */ +trait HasVersionedMessageCompanion[ValueClass] + extends HasVersionedMessageCompanionCommon[ValueClass] { + type Deserializer = ByteString => ParsingResult[ValueClass] + + protected def supportedProtoVersion[Proto <: scalapb.GeneratedMessage]( + p: scalapb.GeneratedMessageCompanion[Proto] + )( + fromProto: Proto => ParsingResult[ValueClass] + ): ByteString => ParsingResult[ValueClass] = + ProtoConverter.protoParser(p.parseFrom)(_).flatMap(fromProto) + + def fromProtoVersioned( + proto: VersionedMessage[ValueClass] + ): ParsingResult[ValueClass] = + proto.wrapper.data.toRight(ProtoDeserializationError.FieldNotSet(s"$name: data")).flatMap { + data => supportedProtoVersions.deserializerFor(ProtoVersion(proto.version))(data) + } + + def fromByteString(bytes: ByteString): ParsingResult[ValueClass] = for { + proto <- ProtoConverter.protoParser(UntypedVersionedMessage.parseFrom)(bytes) + valueClass <- fromProtoVersioned(VersionedMessage(proto)) + } yield valueClass + + def tryFromByteString(bytes: ByteString): ValueClass = + fromByteString(bytes).valueOr(err => + throw new IllegalArgumentException(s"Deserializing $name bytestring failed: $err") + ) + + def fromByteArray(bytes: Array[Byte]): ParsingResult[ValueClass] = for { + proto <- ProtoConverter.protoParserArray(UntypedVersionedMessage.parseFrom)(bytes) + valueClass <- fromProtoVersioned(VersionedMessage(proto)) + } yield valueClass + + def readFromFile( + inputFile: String + ): Either[String, ValueClass] = { + for { + bs <- BinaryFileUtil.readByteStringFromFile(inputFile) + value <- fromByteString(bs).leftMap(_.toString) + } yield value + } + + def tryReadFromFile(inputFile: String): ValueClass = readFromFile(inputFile).valueOr(err => + throw new IllegalArgumentException(s"Reading $name from file $inputFile failed: $err") + ) + + implicit def hasVersionedWrapperGetResult(implicit + getResultByteArray: GetResult[Array[Byte]] + ): GetResult[ValueClass] = GetResult { r => + fromByteArray(r.<<[Array[Byte]]).valueOr(err => + throw new DbDeserializationException(s"Failed to deserialize $name: $err") + ) + } + + implicit def hasVersionedWrapperGetResultO(implicit + getResultByteArrayO: GetResult[Option[Array[Byte]]] + ): GetResult[Option[ValueClass]] = GetResult { r => + r.<<[Option[Array[Byte]]] + .map( + fromByteArray(_).valueOr(err => + throw new DbDeserializationException(s"Failed to deserialize $name: $err") + ) + ) + } +} + +trait HasVersionedMessageCompanionDbHelpers[ValueClass <: HasVersionedWrapper[ValueClass]] { + def getVersionedSetParameter(protocolVersion: ProtocolVersion)(implicit + setParameterByteArray: SetParameter[Array[Byte]] + ): SetParameter[ValueClass] = { (value, pp) => + pp >> value.toByteArray(protocolVersion) + } + + def getVersionedSetParameterO(protocolVersion: ProtocolVersion)(implicit + setParameterByteArrayO: SetParameter[Option[Array[Byte]]] + ): SetParameter[Option[ValueClass]] = + (valueO, pp) => pp >> valueO.map(_.toByteArray(protocolVersion)) +} + +/** Traits for the companion objects of classes that implement [[HasVersionedWrapper]]. + * They provide default methods. + * Unlike [[HasVersionedMessageCompanion]] these traits allow to pass additional + * context to the conversion methods (see, e.g., [[com.digitalasset.canton.data.TransferInViewTree.fromProtoVersioned]] + * which takes a `HashOps` parameter). + */ +trait HasVersionedMessageWithContextCompanion[ValueClass, Ctx] + extends HasVersionedMessageCompanionCommon[ValueClass] { + type Deserializer = (Ctx, ByteString) => ParsingResult[ValueClass] + + protected def supportedProtoVersion[Proto <: scalapb.GeneratedMessage]( + p: scalapb.GeneratedMessageCompanion[Proto] + )( + fromProto: (Ctx, Proto) => ParsingResult[ValueClass] + ): (Ctx, ByteString) => ParsingResult[ValueClass] = + (ctx: Ctx, data: ByteString) => + ProtoConverter.protoParser(p.parseFrom)(data).flatMap(fromProto(ctx, _)) + + def fromProtoVersioned( + ctx: Ctx + )(proto: VersionedMessage[ValueClass]): ParsingResult[ValueClass] = + proto.wrapper.data.toRight(ProtoDeserializationError.FieldNotSet(s"$name: data")).flatMap { + data => supportedProtoVersions.deserializerFor(ProtoVersion(proto.version))(ctx, data) + } + + def fromByteString(ctx: Ctx)(bytes: ByteString): ParsingResult[ValueClass] = for { + proto <- ProtoConverter.protoParser(UntypedVersionedMessage.parseFrom)(bytes) + valueClass <- fromProtoVersioned(ctx)(VersionedMessage(proto)) + } yield valueClass +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/ProtocolVersion.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/ProtocolVersion.scala new file mode 100644 index 0000000000..717080d4bf --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/ProtocolVersion.scala @@ -0,0 +1,336 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.version + +import cats.syntax.either.* +import cats.syntax.traverse.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.ProtoDeserializationError.OtherError +import com.digitalasset.canton.buildinfo.BuildInfo +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.version.ProtocolVersion.{deleted, deprecated, supported, unstable} +import pureconfig.error.FailureReason +import pureconfig.{ConfigReader, ConfigWriter} +import slick.jdbc.{GetResult, PositionedParameters, SetParameter} + +/** A Canton protocol version is a snapshot of how the Canton protocols, that nodes use to communicate, function at a certain point in time + * (e.g., this ‘snapshot’ contains the information what exactly a `SubmissionRequest` to the sequencer looks like and how exactly a Sequencer + * handles a call of the `SendAsync` RPC). + * It is supposed to capture everything that is involved in two different Canton nodes interacting with each other. + * + * The protocol version is important for ensuring we meet our compatibility guarantees such that we can + * - update systems running older Canton versions + * - migrate data from older versions in the database + * - communicate with Canton nodes of different releases + * + * Two Canton nodes can interact if they can speak the same protocol version. + * + * For more details, please refer to the [[https://docs.daml.com/canton/usermanual/versioning.html versioning documentation]] + * in the user manual. + * + * How to add a new protocol version `N`: + * - Define a new constant `v` in the [[ProtocolVersion$]] object via + * {{{lazy val v: ProtocolVersionWithStatus[Unstable] = ProtocolVersion.unstable()}}} + * + * - The new protocol version should be declared as unstable until it is released: + * Define it with type argument [[com.digitalasset.canton.version.ProtocolVersion.Unstable]] + * and add it to the list in [[com.digitalasset.canton.version.ProtocolVersion.unstable]]. + * + * - Add a new test job for the protocol version `N` to the canton_build workflow. + * Make a sensible decision how often it should run. + * If sensible, consider to reduce the frequency some of the other protocol version test jobs are running, + * e.g., by moving them to the canton_nightly job. + * + * How to release a protocol version `N`: + * - Switch the type parameter of the protocol version constant `v` from + * [[com.digitalasset.canton.version.ProtocolVersion.Unstable]] to [[com.digitalasset.canton.version.ProtocolVersion.Stable]] + * As a result, you may have to modify a couple of protobuf definitions and mark them as stable as well. + * + * - Remove `v` from [[com.digitalasset.canton.version.ProtocolVersion.unstable]] + * and add it to [[com.digitalasset.canton.buildinfo.BuildInfo.protocolVersions]]. + * + * - Check the test jobs for protocol versions: + * Likely `N` will become the default protocol version used by the `test` job, + * namely [[com.digitalasset.canton.version.ProtocolVersion.latest]]. + * So the separate test job for `N` is no longer needed. + * Conversely, we now need a new job for the previous default protocol version. + * Usually, it is enough to run the previous version only in canton_nightly. + */ +// Internal only: for the full background, please refer to the following [design doc](https://docs.google.com/document/d/1kDiN-373bZOWploDrtOJ69m_0nKFu_23RNzmEXQOFc8/edit?usp=sharing). +// or [code walkthrough](https://drive.google.com/file/d/199wHq-P5pVPkitu_AYLR4V3i0fJtYRPg/view?usp=sharing) +sealed case class ProtocolVersion private[version] (v: Int) + extends Ordered[ProtocolVersion] + with PrettyPrinting { + type Status <: ProtocolVersion.Status + + def isDeprecated: Boolean = deprecated.contains(this) + + def isUnstable: Boolean = unstable.contains(this) + def isStable: Boolean = !isUnstable + + def isDeleted: Boolean = deleted.contains(this) + + def isDev: Boolean = this == ProtocolVersion.dev + + def isSupported: Boolean = supported.contains(this) + + override def pretty: Pretty[ProtocolVersion] = + prettyOfString(_ => if (isDev) "dev" else v.toString) + + def toProtoPrimitive: Int = v + + // We keep the .0.0 so that old binaries can still decode it + def toProtoPrimitiveS: String = s"$v.0.0" + + override def compare(that: ProtocolVersion): Int = v.compare(that.v) +} + +object ProtocolVersion { + + /** Type-level marker for whether a protocol version is stable */ + sealed trait Status + + /** Marker for unstable protocol versions */ + sealed trait Unstable extends Status + + /** Marker for stable protocol versions */ + sealed trait Stable extends Status + + type ProtocolVersionWithStatus[S <: Status] = ProtocolVersion { type Status = S } + + private[version] def stable(v: Int): ProtocolVersionWithStatus[Stable] = + createWithStatus[Stable](v) + private[version] def unstable(v: Int): ProtocolVersionWithStatus[Unstable] = + createWithStatus[Unstable](v) + + private def createWithStatus[S <: Status](v: Int): ProtocolVersionWithStatus[S] = + new ProtocolVersion(v) { override type Status = S } + + implicit val protocolVersionWriter: ConfigWriter[ProtocolVersion] = + ConfigWriter.toString(_.toProtoPrimitiveS) + + lazy implicit val protocolVersionReader: ConfigReader[ProtocolVersion] = { + ConfigReader.fromString[ProtocolVersion] { str => + ProtocolVersion.create(str).leftMap[FailureReason](InvalidProtocolVersion) + } + } + + implicit val getResultProtocolVersion: GetResult[ProtocolVersion] = + GetResult { r => ProtocolVersion(r.nextInt()) } + + implicit val setParameterProtocolVersion: SetParameter[ProtocolVersion] = + (pv: ProtocolVersion, pp: PositionedParameters) => pp >> pv.v + + /** Try to parse a semver version. + * Return: + * + * - None if `rawVersion` does not satisfy the semver regexp + * - Some(Left(_)) if `rawVersion` satisfies the regex but if an error is found + * (e.g., if minor!=0). + * - Some(Right(ProtocolVersion(_))) in case of success + */ + private def parseSemver(rawVersion: String): Option[Either[String, ProtocolVersion]] = { + val regex = raw"([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,4})".r + + rawVersion match { + case regex(rawMajor, rawMinor, rawPatch) => + val parsedDigits = List(rawMajor, rawMinor, rawPatch).traverse(raw => + raw.toIntOption.toRight(s"Couldn't parse number $raw") + ) + + parsedDigits match { + case Left(error) => Some(Left(error)) + + case Right(List(major, minor, patch)) => + Some( + Either.cond( + minor == 0 && patch == 0, + ProtocolVersion(major), + s"Protocol version should consist of a single number; but `$rawVersion` found", + ) + ) + + case _ => Some(Left(s"Unexpected error while parsing version $rawVersion")) + } + + case _ => None + } + } + + private def parseDev(rawVersion: String): Option[ProtocolVersion] = { + // ignore case for dev version ... scala regex doesn't know case insensitivity ... + val devRegex = "^[dD][eE][vV]$".r + val devFull = ProtocolVersion.dev.toProtoPrimitiveS + + rawVersion match { + // Since dev uses Int.MaxValue, it does not satisfy the regex above + case `devFull` | devRegex() => Some(ProtocolVersion.dev) + case _ => None + } + } + + private[version] def unsupportedErrorMessage(pv: ProtocolVersion, includeDeleted: Boolean) = { + val supportedStablePVs = stableAndSupported.map(_.toString) + + val supportedPVs = if (includeDeleted) { + val deletedPVs = deleted.map(pv => s"(${pv.toString})") + supportedStablePVs ++ deletedPVs + } else supportedStablePVs + + s"Protocol version $pv is not supported. The supported versions are ${supportedPVs.mkString(", ")}." + } + + /** Parse a given raw version string into a [[ProtocolVersion]] without any further validation, i.e. it allows to + * create invalid and unsupported [[ProtocolVersion]]! + * + * ONLY use this method when + * - implementing functionality for the [[ProtocolVersion]] itself + * - additional validation is being applied on the resulting [[ProtocolVersion]] afterwards as a exception + * - testing and having a need for an invalid or unsupported [[ProtocolVersion]] + * + * Otherwise, use one of the other factory methods. + */ + private[version] def parseUnchecked(rawVersion: String): Either[String, ProtocolVersion] = { + rawVersion.toIntOption match { + case Some(value) => Right(ProtocolVersion(value)) + + case None => + parseSemver(rawVersion) + .orElse(parseDev(rawVersion).map(Right(_))) + .getOrElse(Left(s"Unable to convert string `$rawVersion` to a protocol version.")) + } + } + + /** Creates a [[ProtocolVersion]] from the given raw version value and ensures that it is a supported version. + * @param rawVersion String to be parsed. + * @param allowDeleted If true, don't fail if `rawVersion` corresponds to a deleted protocol version. + * This should only be used when parsing a version that does not correspond to the one + * running on the domain. One such example is the minimum supported protocol version from + * a participant. + * @return + */ + def create( + rawVersion: String, + allowDeleted: Boolean = false, + ): Either[String, ProtocolVersion] = + parseUnchecked(rawVersion).flatMap { pv => + val isSupported = pv.isSupported || (allowDeleted && pv.isDeleted) + + Either.cond(isSupported, pv, unsupportedErrorMessage(pv, includeDeleted = allowDeleted)) + } + + /** Like [[create]] ensures a supported protocol version; but throws a runtime exception for errors. + */ + def tryCreate(rawVersion: String): ProtocolVersion = create(rawVersion).valueOr(sys.error) + + /** Like [[create]] ensures a supported protocol version; tailored to (de-)serialization purposes. + */ + def fromProtoPrimitive(rawVersion: Int): ParsingResult[ProtocolVersion] = { + val pv = ProtocolVersion(rawVersion) + Either.cond(pv.isSupported, pv, OtherError(unsupportedErrorMessage(pv, includeDeleted = false))) + } + + /** Like [[create]] ensures a supported protocol version; tailored to (de-)serialization purposes. + */ + def fromProtoPrimitiveS(rawVersion: String): ParsingResult[ProtocolVersion] = { + ProtocolVersion.create(rawVersion).leftMap(OtherError) + } + + final case class InvalidProtocolVersion(override val description: String) extends FailureReason + + // All stable protocol versions supported by this release + // TODO(#15561) Switch to non-empty again + val stableAndSupported: List[ProtocolVersion] = + BuildInfo.protocolVersions + .map(parseUnchecked) + .map(_.valueOr(sys.error)) + .toList + + private val deprecated: Seq[ProtocolVersion] = Seq() + private val deleted: NonEmpty[Seq[ProtocolVersion]] = + NonEmpty( + Seq, + ProtocolVersion(2), + ProtocolVersion(3), + ProtocolVersion(4), + ProtocolVersion(5), + ProtocolVersion(6), + ) + + val unstable: NonEmpty[List[ProtocolVersionWithStatus[Unstable]]] = + NonEmpty.mk(List, ProtocolVersion.v30, ProtocolVersion.dev) + + val supported: NonEmpty[List[ProtocolVersion]] = (unstable ++ stableAndSupported).sorted + + // TODO(i15561): change back to `stableAndSupported.max1` once there is a stable Daml 3 protocol version + val latest: ProtocolVersion = stableAndSupported.lastOption.getOrElse(unstable.head1) + + lazy val dev: ProtocolVersionWithStatus[Unstable] = ProtocolVersion.unstable(Int.MaxValue) + + lazy val v30: ProtocolVersionWithStatus[Unstable] = ProtocolVersion.unstable(30) + + // Minimum stable protocol version introduced + lazy val minimum: ProtocolVersion = v30 +} + +/* + This class wraps a protocol version which is global to the participant. + The wrapped value usually corresponds to the latest (stable) protocol version supported by the binary. + */ +final case class ReleaseProtocolVersion(v: ProtocolVersion) extends AnyVal + +object ReleaseProtocolVersion { + val latest: ReleaseProtocolVersion = ReleaseProtocolVersion(ProtocolVersion.latest) +} + +object Transfer { + + /** When dealing with transfer, allow to be more precise with respect to the domain */ + final case class SourceProtocolVersion(v: ProtocolVersion) extends AnyVal + + object SourceProtocolVersion { + implicit val getResultSourceProtocolVersion: GetResult[SourceProtocolVersion] = + GetResult[ProtocolVersion].andThen(SourceProtocolVersion(_)) + + implicit val setParameterSourceProtocolVersion: SetParameter[SourceProtocolVersion] = + (pv: SourceProtocolVersion, pp: PositionedParameters) => pp >> pv.v + } + + final case class TargetProtocolVersion(v: ProtocolVersion) extends AnyVal + + object TargetProtocolVersion { + implicit val getResultTargetProtocolVersion: GetResult[TargetProtocolVersion] = + GetResult[ProtocolVersion].andThen(TargetProtocolVersion(_)) + + implicit val setParameterTargetProtocolVersion: SetParameter[TargetProtocolVersion] = + (pv: TargetProtocolVersion, pp: PositionedParameters) => pp >> pv.v + } +} + +final case class ProtoVersion(v: Int) extends AnyVal + +object ProtoVersion { + implicit val protoVersionOrdering: Ordering[ProtoVersion] = + Ordering.by[ProtoVersion, Int](_.v) +} + +/** Marker trait for Protobuf messages generated by scalapb + * that are used in some [[com.digitalasset.canton.version.ProtocolVersion.isStable stable]] protocol versions + * + * Implements both [[com.digitalasset.canton.version.ProtocolVersion.Stable]] and [[com.digitalasset.canton.version.ProtocolVersion.Unstable]] + * means that [[StableProtoVersion]] messages can be used in stable and unstable protocol versions. + */ +trait StableProtoVersion extends ProtocolVersion.Stable with ProtocolVersion.Unstable + +/** Marker trait for Protobuf messages generated by scalapb + * that are used only in [[com.digitalasset.canton.version.ProtocolVersion.isUnstable unstable]] protocol versions + */ +trait UnstableProtoVersion extends ProtocolVersion.Unstable + +/** Marker trait for Protobuf messages generated by scalapb + * that are used only to persist data in node storage. + * These messages are never exchanged as part of a protocol. + */ +trait StorageProtoVersion diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/VersionedMessage.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/VersionedMessage.scala new file mode 100644 index 0000000000..f54f16cdd5 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/VersionedMessage.scala @@ -0,0 +1,29 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.version + +import com.google.protobuf.ByteString + +object VersionedMessage { + def apply[M](bytes: ByteString, version: Int): VersionedMessage[M] = + VersionedMessage(UntypedVersionedMessage(UntypedVersionedMessage.Wrapper.Data(bytes), version)) + + def apply[M](message: UntypedVersionedMessage): VersionedMessage[M] = + VersionedMessageImpl.Instance.subst(message) +} + +sealed abstract class VersionedMessageImpl { + type VersionedMessage[+A] <: UntypedVersionedMessage + + private[version] def subst[M](message: UntypedVersionedMessage): VersionedMessage[M] +} + +object VersionedMessageImpl { + val Instance: VersionedMessageImpl = new VersionedMessageImpl { + override type VersionedMessage[+A] = UntypedVersionedMessage + + override private[version] def subst[M](message: UntypedVersionedMessage): VersionedMessage[M] = + message + } +} diff --git a/canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/version.scala b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/version.scala new file mode 100644 index 0000000000..fe0f9579ce --- /dev/null +++ b/canton-3x/community/base/src/main/scala/com/digitalasset/canton/version/version.scala @@ -0,0 +1,22 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton + +package object version { + type VersionedMessage[+M] = VersionedMessageImpl.Instance.VersionedMessage[M] + + type HasMemoizedProtocolVersionedWrapperCompanion[ + ValueClass <: HasRepresentativeProtocolVersion + ] = HasMemoizedProtocolVersionedWrapperCompanion2[ValueClass, ValueClass] + + type HasMemoizedProtocolVersionedWithContextCompanion[ + ValueClass <: HasRepresentativeProtocolVersion, + Context, + ] = HasMemoizedProtocolVersionedWithContextCompanion2[ValueClass, ValueClass, Context] + + type HasProtocolVersionedCompanion[ + ValueClass <: HasRepresentativeProtocolVersion + ] = HasProtocolVersionedCompanion2[ValueClass, ValueClass] + +} diff --git a/canton-3x/community/base/src/main/scala/scala/concurrent/BatchingExecutorCanton.scala b/canton-3x/community/base/src/main/scala/scala/concurrent/BatchingExecutorCanton.scala new file mode 100644 index 0000000000..c40764213a --- /dev/null +++ b/canton-3x/community/base/src/main/scala/scala/concurrent/BatchingExecutorCanton.scala @@ -0,0 +1,8 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package scala.concurrent + +/** Purpose of this trait is to make [[scala.concurrent.BatchingExecutor]] public. + */ +trait BatchingExecutorCanton extends BatchingExecutor diff --git a/canton-3x/community/base/src/main/scala/slick/util/AsyncExecutorWithMetrics.scala b/canton-3x/community/base/src/main/scala/slick/util/AsyncExecutorWithMetrics.scala new file mode 100644 index 0000000000..283a22d02b --- /dev/null +++ b/canton-3x/community/base/src/main/scala/slick/util/AsyncExecutorWithMetrics.scala @@ -0,0 +1,454 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package slick.util + +import com.digitalasset.canton.DiscardOps +import com.digitalasset.canton.config.QueryCostMonitoringConfig +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.metrics.DbQueueMetrics +import com.digitalasset.canton.time.PositiveFiniteDuration +import com.digitalasset.canton.util.{LoggerUtil, MonadUtil} +import com.typesafe.scalalogging.Logger +import slick.util.AsyncExecutor.{PrioritizedRunnable, Priority, WithConnection} + +import java.lang.management.ManagementFactory +import java.util +import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference} +import java.util.concurrent.{TimeUnit, *} +import javax.management.{InstanceNotFoundException, ObjectName} +import scala.annotation.tailrec +import scala.collection.concurrent.TrieMap +import scala.concurrent.ExecutionContextExecutor +import scala.concurrent.duration.* +import scala.util.control.NonFatal + +@SuppressWarnings( + Array( + "org.wartremover.warts.Null", + "org.wartremover.warts.IsInstanceOf", + "org.wartremover.warts.AsInstanceOf", + "org.wartremover.warts.StringPlusAny", + "org.wartremover.warts.Var", + ) +) +class AsyncExecutorWithMetrics( + name: String, + minThreads: Int, + maxThreads: Int, + queueSize: Int, + maxConnections: Int = Integer.MAX_VALUE, + keepAliveTime: FiniteDuration = 1.minute, + registerMbeans: Boolean = false, + logQueryCost: Option[QueryCostMonitoringConfig], + metrics: DbQueueMetrics, + scheduler: Option[ScheduledExecutorService], + warnOnSlowQueryO: Option[PositiveFiniteDuration], + warnInterval: PositiveFiniteDuration = PositiveFiniteDuration.tryOfSeconds(5), + val logger: Logger, +) extends AsyncExecutorWithShutdown { + + @volatile private[this] lazy val mbeanName = new ObjectName( + s"slick:type=AsyncExecutor,name=$name" + ); + + // Before init: 0, during init: 1, after init: 2, during/after shutdown: 3 + private[this] val state = new AtomicInteger(0) + override def isShuttingDown: Boolean = state.get() == 3 + + @volatile private[this] var executor: ThreadPoolExecutor = _ + + if (maxConnections > maxThreads) { + // NOTE: when using transactions or DB locks, it may happen that a task has a lock on the database but no thread + // to complete its action, while other tasks may have all the threads but are waiting for the first task to + // complete. This creates a deadlock. + logger.warn( + "Having maxConnection > maxThreads can result in deadlocks if transactions or database locks are used." + ) + } + + lazy val executionContext = { + if (!state.compareAndSet(0, 1)) + throw new IllegalStateException( + "Cannot initialize ExecutionContext; AsyncExecutor already shut down" + ) + val queue: BlockingQueue[Runnable] = queueSize match { + case 0 => + // NOTE: SynchronousQueue does not schedule high-priority tasks before others and so it cannot be used when + // the number of connections is limited (lest high-priority tasks may be holding all connections and low/mid + // priority tasks all threads -- resulting in a deadlock). + require( + maxConnections == Integer.MAX_VALUE, + "When using queueSize == 0 (direct hand-off), maxConnections must be Integer.MAX_VALUE.", + ) + + new SynchronousQueue[Runnable] + case -1 => + // NOTE: LinkedBlockingQueue does not schedule high-priority tasks before others and so it cannot be used when + // the number of connections is limited (lest high-priority tasks may be holding all connections and low/mid + // priority tasks all threads -- resulting in a deadlock). + require( + maxConnections == Integer.MAX_VALUE, + "When using queueSize == -1 (unlimited), maxConnections must be Integer.MAX_VALUE.", + ) + + new LinkedBlockingQueue[Runnable] + case n => + // NOTE: The current implementation of ManagedArrayBlockingQueue is flawed. It makes the assumption that all + // tasks go through the queue (which is responsible for scheduling high-priority tasks first). However, that + // assumption is wrong since the ThreadPoolExecutor bypasses the queue when it creates new threads. This + // happens whenever it creates a new thread to run a task, i.e. when minThreads < maxThreads and the number + // of existing threads is < maxThreads. + // + // The only way to prevent problems is to have minThreads == maxThreads when using the + // ManagedArrayBlockingQueue. + require( + minThreads == maxThreads, + "When using queueSize > 0, minThreads == maxThreads is required.", + ) + + // NOTE: The current implementation of ManagedArrayBlockingQueue.increaseInUseCount implicitly `require`s that + // maxThreads <= maxConnections. + require( + maxThreads <= maxConnections, + "When using queueSize > 0, maxThreads <= maxConnections is required.", + ) + + // NOTE: Adding up the above rules + // - maxThreads >= maxConnections, to prevent database locking issues when using transactions + // - maxThreads <= maxConnections, required by ManagedArrayBlockingQueue + // - maxThreads == minThreads, ManagedArrayBlockingQueue + // + // We have maxThreads == minThreads == maxConnections as the only working configuration + + new ManagedArrayBlockingQueue(maxConnections, n).asInstanceOf[BlockingQueue[Runnable]] + } + + // canton change begin + object QueryCostTracker { + + /** count / total time */ + private val cost = new AtomicReference[Map[String, (Long, Long)]](Map()) + private val lastReport = new AtomicReference(CantonTimestamp.now()) + def track(trace: String, runningTime: Long): Unit = { + if (logger.underlying.isInfoEnabled) { + logQueryCost.foreach { case QueryCostMonitoringConfig(frequency, resetOnOutput, _) => + val updated = cost.updateAndGet { tmp => + val (count, total): (Long, Long) = tmp.getOrElse(trace, (0, 0)) + tmp + (trace -> ((count + 1, total + runningTime))) + } + val now = CantonTimestamp.now() + val upd = lastReport.updateAndGet(rp => + if (rp.plusMillis(frequency.unwrap.toMillis) < now) { + // Reset cost tracking + if (resetOnOutput) cost.set(Map()) + now + } else rp + ) + if (upd == now) { + val items = updated.toSeq + .sortBy(x => -x._2._2) + .take(15) + .map { case (name, (count, nanos)) => + f"count=$count%7d mean=${nanos / (Math.max(count, 1) * 1e6)}%7.2f ms total=${nanos / 1e9}%5.1f s $name%s" + } + .mkString("\n ") + val total = updated.values.map(_._2).sum + logger.info( + s"Here is our list of the 15 most expensive database queries for ${metrics.prefix} with total of ${total / 1e9}%5.1f s:\n " + items + ) + } + } + } + } + } + + val running = new ConcurrentLinkedQueue[QueryInfo]() + val (warnOnSlowQuery, warnOnSlowQueryMs): (Boolean, Long) = warnOnSlowQueryO match { + case Some(duration) => (true, duration.duration.toMillis) + case None => (false, 20000) + } + val warnIntervalMs = warnInterval.duration.toMillis + val lastAlert = new AtomicReference[Long](0) + + @tailrec + def cleanupAndAlert(now: Long): Unit = if (warnOnSlowQuery) { + Option(running.poll()) match { + // if item is done, drop it and iterate + case Some(item) if item.isDone => + cleanupAndAlert(now) + case None => () + // item is not done, check if we should warn + case Some(item) => + // determine if we should warn again + val last = lastAlert.get() + def isSlow: Boolean = + TimeUnit.NANOSECONDS.toMillis(now - item.getScheduledNanos) > warnOnSlowQueryMs + def alert: Boolean = TimeUnit.NANOSECONDS.toMillis(now - last) > warnIntervalMs + // if item is expired and if this warning process isn't running concurrently, emit a new warning + if (isSlow && alert && lastAlert.compareAndSet(last, now)) { + item.reportAsSlow() + import scala.jdk.CollectionConverters.* + val queries = (running + .iterator() + .asScala + .filterNot(_.isDone) + .toSeq :+ item) + .sortBy(_.getScheduledNanos) + .map(x => + s"${x.callsite} running-for=${TimeUnit.NANOSECONDS.toMillis(now - x.getScheduledNanos)} ms" + ) + .mkString("\n ") + if (queries.nonEmpty) { + logger.warn("Very slow or blocked queries detected:\n " + queries) + } + } + // put it back + running.add(item).discard + } + + } + + // schedule background check for slow queries + val backgroundChecker = + if (warnOnSlowQuery) + scheduler.map( + _.scheduleAtFixedRate( + () => { + cleanupAndAlert(System.nanoTime()) + }, + 1000L, // initial delay + 1000L, // period + TimeUnit.MILLISECONDS, + ) + ) + else None + + final case class QueryInfo(callsite: String) { + + private val added = System.nanoTime() + private val scheduledNanos = new AtomicReference[Long](0) + private val done = new AtomicBoolean(false) + private val reportedAsSlow = new AtomicBoolean(false) + + // increase queue counter on creation + metrics.queue.inc() + + def scheduled(): Unit = { + metrics.queue.dec() + metrics.running.inc() + val tm = System.nanoTime() + metrics.waitTimer.update(tm - added, TimeUnit.NANOSECONDS) + scheduledNanos.set(tm) + if (warnOnSlowQuery) { + running.add(this).discard + } + } + + def reportAsSlow(): Unit = { + reportedAsSlow.set(true) + } + + def isDone: Boolean = done.get() + + def getScheduledNanos: Long = scheduledNanos.get() + + def completed(): Unit = { + val tm = System.nanoTime() + done.set(true) + val started = scheduledNanos.get() + if (started > 0) { + metrics.running.dec() + QueryCostTracker.track(callsite, tm - started) + } else { + QueryCostTracker.track(s"$callsite - missing start time", tm - added) + } + if (reportedAsSlow.get()) { + logger.warn( + s"Slow database query ${callsite} finished after ${TimeUnit.NANOSECONDS.toMillis(tm - started)} ms" + ) + } + cleanupAndAlert(tm) + } + + def failed(): Unit = { + done.set(true) + metrics.queue.dec() + cleanupAndAlert(System.nanoTime()) + } + + } + // canton change end + + val stats = TrieMap[Runnable, QueryInfo]() + val tf = new DaemonThreadFactory(name + "-") + executor = new ThreadPoolExecutor( + minThreads, + maxThreads, + keepAliveTime.toMillis, + TimeUnit.MILLISECONDS, + queue, + tf, + ) { + + /** If the runnable/task is a low/medium priority item, we increase the items in use count, because first thing it will do + * is open a Jdbc connection from the pool. + */ + override def beforeExecute(t: Thread, r: Runnable): Unit = { + (r, queue) match { + case (pr: PrioritizedRunnable, q: ManagedArrayBlockingQueue[Runnable]) + if pr.priority != WithConnection => + q.increaseInUseCount(pr) + case _ => + } + // canton change begin + // update stats + stats.get(r).foreach(_.scheduled()) + // canton change end + super.beforeExecute(t, r) + } + + // canton change begin + private val ignore = + Seq( + "slick", + "java.", + "scala.", + "cats.", + "com.daml.metrics", + "com.daml.executors", + "com.digitalasset.canton.resource", + "com.digitalasset.canton.resource.DbStorageMulti", + "com.digitalasset.canton.util.retry", + "com.digitalasset.canton.metrics", + "com.daml.executors", + "com.digitalasset.canton.store.db.DbBulkUpdateProcessor", + "com.digitalasset.canton.lifecycle", + LoggerUtil.getClass.getName.dropRight(1), // Drop Scala's trailing $ + MonadUtil.getClass.getName.dropRight(1), // Drop Scala's trailing $ + ) + override def execute(command: Runnable): Unit = { + val tr = if (logQueryCost.nonEmpty) { + // find call site + Thread + .currentThread() + .getStackTrace + .find { e => + ignore.forall(pack => !e.getClassName.startsWith(pack)) + } + .map(_.toString) + .getOrElse("") + } else "query-tracking-disabled" + // initialize statistics gathering + stats.put(command, QueryInfo(tr)).discard + try { + super.execute(command) + } catch { + // if we throw here, the task will never be executed. therefore, we'll have to remove the task statistics + // again to not leak memory + case NonFatal(e) => + stats.remove(command).foreach(_.failed()).discard + throw e + } + } + // canton change end + + /** If the runnable/task has released the Jdbc connection we decrease the counter again + */ + override def afterExecute(r: Runnable, t: Throwable): Unit = { + try { + super.afterExecute(r, t) + (r, queue) match { + case (pr: PrioritizedRunnable, q: ManagedArrayBlockingQueue[Runnable]) + if pr.connectionReleased => + q.decreaseInUseCount() + case _ => + } + // canton change begin + } finally { + stats.remove(r).foreach(_.completed()) + // canton change end + } + } + // canton change begin + override def shutdownNow(): util.List[Runnable] = { + backgroundChecker.foreach(_.cancel(true)) + running.clear() + super.shutdownNow() + } + // canton change end + } + if (registerMbeans) { + try { + val mbeanServer = ManagementFactory.getPlatformMBeanServer + if (mbeanServer.isRegistered(mbeanName)) + logger.warn(s"MBean $mbeanName already registered (AsyncExecutor names should be unique)") + else { + logger.debug(s"Registering MBean $mbeanName") + mbeanServer.registerMBean( + new AsyncExecutorMXBean { + def getMaxQueueSize = queueSize + def getQueueSize = queue.size() + def getMaxThreads = maxThreads + def getActiveThreads = executor.getActiveCount + }, + mbeanName, + ) + } + } catch { case NonFatal(ex) => logger.error("Error registering MBean", ex) } + } + if (!state.compareAndSet(1, 2)) { + unregisterMbeans() + executor.shutdownNow() + throw new IllegalStateException( + "Cannot initialize ExecutionContext; AsyncExecutor shut down during initialization" + ) + } + new ExecutionContextExecutor { + + override def reportFailure(t: Throwable): Unit = + logger.error("Async executor failed with exception", t) + + override def execute(command: Runnable): Unit = { + if (command.isInstanceOf[PrioritizedRunnable]) { + executor.execute(command) + } else { + executor.execute(new PrioritizedRunnable { + override val priority: Priority = WithConnection + override def run(): Unit = command.run() + }) + } + } + } + } + + private[this] def unregisterMbeans(): Unit = if (registerMbeans) { + try { + val mbeanServer = ManagementFactory.getPlatformMBeanServer + logger.debug(s"Unregistering MBean $mbeanName") + try mbeanServer.unregisterMBean(mbeanName) + catch { case _: InstanceNotFoundException => } + } catch { case NonFatal(ex) => logger.error("Error unregistering MBean", ex) } + } + + def close(): Unit = if (state.getAndSet(3) == 2) { + unregisterMbeans() + executor.shutdownNow() + if (!executor.awaitTermination(30, TimeUnit.SECONDS)) + logger.warn("Abandoning ThreadPoolExecutor (not yet destroyed after 30 seconds)") + } + + private class DaemonThreadFactory(namePrefix: String) extends ThreadFactory { + private[this] val group = + Option(System.getSecurityManager).fold(Thread.currentThread.getThreadGroup)(_.getThreadGroup) + private[this] val threadNumber = new AtomicInteger(1) + + def newThread(r: Runnable): Thread = { + val t = new Thread(group, r, namePrefix + threadNumber.getAndIncrement, 0) + if (!t.isDaemon) t.setDaemon(true) + if (t.getPriority != Thread.NORM_PRIORITY) t.setPriority(Thread.NORM_PRIORITY) + t + } + } + +} diff --git a/canton-3x/community/base/src/main/scala/slick/util/AsyncExecutorWithShutdown.scala b/canton-3x/community/base/src/main/scala/slick/util/AsyncExecutorWithShutdown.scala new file mode 100644 index 0000000000..579f93d22b --- /dev/null +++ b/canton-3x/community/base/src/main/scala/slick/util/AsyncExecutorWithShutdown.scala @@ -0,0 +1,8 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package slick.util + +trait AsyncExecutorWithShutdown extends AsyncExecutor { + def isShuttingDown: Boolean +} diff --git a/canton-3x/community/base/src/main/scala/slick/util/LICENSE.txt b/canton-3x/community/base/src/main/scala/slick/util/LICENSE.txt new file mode 100644 index 0000000000..b92ac80e22 --- /dev/null +++ b/canton-3x/community/base/src/main/scala/slick/util/LICENSE.txt @@ -0,0 +1,25 @@ +Copyright 2011-2021 Lightbend, Inc. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ActiveContracts.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ActiveContracts.java new file mode 100644 index 0000000000..f091460278 --- /dev/null +++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ActiveContracts.java @@ -0,0 +1,55 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.javaapi.data; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import org.checkerframework.checker.nullness.qual.NonNull; + +public final class ActiveContracts { + + public final Optional offset; + + public final List activeContracts; + + public final String workflowId; + + public ActiveContracts( + @NonNull Optional offset, + @NonNull List activeContracts, + @NonNull String workflowId) { + this.offset = offset; + this.activeContracts = activeContracts; + this.workflowId = workflowId; + } + + @Override + public String toString() { + return "ActiveContracts{" + + "offset='" + + offset + + '\'' + + ", activeContracts=" + + activeContracts + + ", workflowId=" + + workflowId + + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ActiveContracts that = (ActiveContracts) o; + return offset.equals(that.offset) + && Objects.equals(activeContracts, that.activeContracts) + && Objects.equals(workflowId, that.workflowId); + } + + @Override + public int hashCode() { + return Objects.hash(offset, activeContracts, workflowId); + } +} diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ArchivedEvent.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ArchivedEvent.java new file mode 100644 index 0000000000..430dbb5319 --- /dev/null +++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ArchivedEvent.java @@ -0,0 +1,105 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.javaapi.data; + +import com.daml.ledger.api.v1.EventOuterClass; +import java.util.List; +import java.util.Objects; +import org.checkerframework.checker.nullness.qual.NonNull; + +public final class ArchivedEvent implements Event { + + private final List witnessParties; + + private final String eventId; + + private final Identifier templateId; + + private final String contractId; + + public ArchivedEvent( + @NonNull List<@NonNull String> witnessParties, + @NonNull String eventId, + @NonNull Identifier templateId, + @NonNull String contractId) { + this.witnessParties = witnessParties; + this.eventId = eventId; + this.templateId = templateId; + this.contractId = contractId; + } + + @NonNull + @Override + public List<@NonNull String> getWitnessParties() { + return witnessParties; + } + + @NonNull + @Override + public String getEventId() { + return eventId; + } + + @NonNull + @Override + public Identifier getTemplateId() { + return templateId; + } + + @NonNull + @Override + public String getContractId() { + return contractId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ArchivedEvent that = (ArchivedEvent) o; + return Objects.equals(witnessParties, that.witnessParties) + && Objects.equals(eventId, that.eventId) + && Objects.equals(templateId, that.templateId) + && Objects.equals(contractId, that.contractId); + } + + @Override + public int hashCode() { + + return Objects.hash(witnessParties, eventId, templateId, contractId); + } + + @Override + public String toString() { + return "ArchivedEvent{" + + "witnessParties=" + + witnessParties + + ", eventId='" + + eventId + + '\'' + + ", templateId=" + + templateId + + ", contractId='" + + contractId + + '\'' + + '}'; + } + + public EventOuterClass.ArchivedEvent toProto() { + return EventOuterClass.ArchivedEvent.newBuilder() + .setContractId(getContractId()) + .setEventId(getEventId()) + .setTemplateId(getTemplateId().toProto()) + .addAllWitnessParties(this.getWitnessParties()) + .build(); + } + + public static ArchivedEvent fromProto(EventOuterClass.ArchivedEvent archivedEvent) { + return new ArchivedEvent( + archivedEvent.getWitnessPartiesList(), + archivedEvent.getEventId(), + Identifier.fromProto(archivedEvent.getTemplateId()), + archivedEvent.getContractId()); + } +} diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Bool.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Bool.java new file mode 100644 index 0000000000..264deb863d --- /dev/null +++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Bool.java @@ -0,0 +1,58 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.javaapi.data; + +import com.daml.ledger.api.v1.ValueOuterClass; +import java.util.Objects; + +public final class Bool extends Value { + + private final boolean value; + + public static final Bool TRUE = new Bool(true); + public static final Bool FALSE = new Bool(false); + + // TODO(i15639) make private; delete equals/hashCode + /** @deprecated Use {@link #of} instead; since Daml 2.5.0 */ + @Deprecated + public Bool(boolean value) { + this.value = value; + } + + public static Bool of(boolean value) { + return value ? TRUE : FALSE; + } + + @Override + public ValueOuterClass.Value toProto() { + return ValueOuterClass.Value.newBuilder().setBool(this.value).build(); + } + + public boolean isValue() { + return value; + } + + public boolean getValue() { + return isValue(); + } + + @Override + public String toString() { + return "Bool{" + "value=" + value + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Bool bool = (Bool) o; + return value == bool.value; + } + + @Override + public int hashCode() { + + return Objects.hash(value); + } +} diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Checkpoint.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Checkpoint.java new file mode 100644 index 0000000000..aa60e95cf4 --- /dev/null +++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Checkpoint.java @@ -0,0 +1,68 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.javaapi.data; + +import com.daml.ledger.api.v1.CommandCompletionServiceOuterClass; +import java.time.Instant; +import java.util.Objects; +import org.checkerframework.checker.nullness.qual.NonNull; + +public final class Checkpoint { + + private final Instant recordTime; + + private final LedgerOffset offset; + + public Checkpoint(@NonNull Instant recordTime, @NonNull LedgerOffset offset) { + this.recordTime = recordTime; + this.offset = offset; + } + + public static Checkpoint fromProto(CommandCompletionServiceOuterClass.Checkpoint checkpoint) { + LedgerOffset offset = LedgerOffset.fromProto(checkpoint.getOffset()); + return new Checkpoint( + Instant.ofEpochSecond( + checkpoint.getRecordTime().getSeconds(), checkpoint.getRecordTime().getNanos()), + offset); + } + + public CommandCompletionServiceOuterClass.Checkpoint toProto() { + return CommandCompletionServiceOuterClass.Checkpoint.newBuilder() + .setRecordTime( + com.google.protobuf.Timestamp.newBuilder() + .setSeconds(this.recordTime.getEpochSecond()) + .setNanos(this.recordTime.getNano()) + .build()) + .setOffset(this.offset.toProto()) + .build(); + } + + public @NonNull Instant getRecordTime() { + return recordTime; + } + + @NonNull + public LedgerOffset getOffset() { + return offset; + } + + @Override + public String toString() { + return "Checkpoint{" + "recordTime=" + recordTime + ", offset=" + offset + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Checkpoint that = (Checkpoint) o; + return Objects.equals(recordTime, that.recordTime) && Objects.equals(offset, that.offset); + } + + @Override + public int hashCode() { + + return Objects.hash(recordTime, offset); + } +} diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Command.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Command.java new file mode 100644 index 0000000000..1d8c6306ee --- /dev/null +++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Command.java @@ -0,0 +1,73 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.javaapi.data; + +import com.daml.ledger.api.v1.CommandsOuterClass; +import com.daml.ledger.javaapi.data.codegen.HasCommands; +import java.util.List; +import java.util.Optional; + +public abstract class Command implements HasCommands { + + abstract Identifier getTemplateId(); + + @Override + public final List commands() { + return List.of(this); + } + + public static Command fromProtoCommand(CommandsOuterClass.Command command) { + switch (command.getCommandCase()) { + case CREATE: + return CreateCommand.fromProto(command.getCreate()); + case EXERCISE: + return ExerciseCommand.fromProto(command.getExercise()); + case CREATEANDEXERCISE: + return CreateAndExerciseCommand.fromProto(command.getCreateAndExercise()); + case EXERCISEBYKEY: + return ExerciseByKeyCommand.fromProto(command.getExerciseByKey()); + case COMMAND_NOT_SET: + default: + throw new ProtoCommandUnknown(command); + } + } + + public CommandsOuterClass.Command toProtoCommand() { + CommandsOuterClass.Command.Builder builder = CommandsOuterClass.Command.newBuilder(); + if (this instanceof CreateCommand) { + builder.setCreate(((CreateCommand) this).toProto()); + } else if (this instanceof ExerciseCommand) { + builder.setExercise(((ExerciseCommand) this).toProto()); + } else if (this instanceof CreateAndExerciseCommand) { + builder.setCreateAndExercise(((CreateAndExerciseCommand) this).toProto()); + } else if (this instanceof ExerciseByKeyCommand) { + builder.setExerciseByKey(((ExerciseByKeyCommand) this).toProto()); + } else { + throw new CommandUnknown(this); + } + return builder.build(); + } + + public final Optional asCreateCommand() { + return (this instanceof CreateCommand) ? Optional.of((CreateCommand) this) : Optional.empty(); + } + + public final Optional asExerciseCommand() { + return (this instanceof ExerciseCommand) + ? Optional.of((ExerciseCommand) this) + : Optional.empty(); + } +} + +class CommandUnknown extends RuntimeException { + public CommandUnknown(Command command) { + super("Command unknown " + command.toString()); + } +} + +class ProtoCommandUnknown extends RuntimeException { + public ProtoCommandUnknown(CommandsOuterClass.Command command) { + super("Command unknown " + command.toString()); + } +} diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CommandsSubmission.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CommandsSubmission.java new file mode 100644 index 0000000000..47092e3481 --- /dev/null +++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CommandsSubmission.java @@ -0,0 +1,278 @@ +// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.javaapi.data; + +import static java.util.Collections.emptyList; +import static java.util.Collections.unmodifiableList; +import static java.util.Optional.empty; + +import com.daml.ledger.javaapi.data.codegen.HasCommands; +import java.time.Duration; +import java.time.Instant; +import java.util.List; +import java.util.Optional; +import org.checkerframework.checker.nullness.qual.NonNull; + +/** + * This class can be used to build a valid submission. It provides {@link #create(String, String, List)} + * for initial creation and methods to set optional parameters + * e.g {@link #withActAs(List)}, {@link #withWorkflowId(String)} etc. + * + * Usage: + *
+ *   var submission = CommandsSubmission.create(applicationId, commandId, commands)
+ *                                   .withAccessToken(token)
+ *                                   .withParty(party)
+ *                                   .with...
+ * 
+ */
+public final class CommandsSubmission {
+  private String applicationId;
+  private String commandId;
+  private List<@NonNull ? extends HasCommands> commands;
+
+  private Optional workflowId;
+  private List<@NonNull String> actAs;
+  private List<@NonNull String> readAs;
+  private Optional minLedgerTimeAbs;
+  private Optional minLedgerTimeRel;
+  private Optional deduplicationTime;
+  private Optional accessToken;
+  private List disclosedContracts;
+
+  protected CommandsSubmission(
+      String applicationId,
+      String commandId,
+      List<@NonNull ? extends HasCommands> commands,
+      List<@NonNull String> actAs,
+      List<@NonNull String> readAs,
+      Optional workflowId,
+      Optional minLedgerTimeAbs,
+      Optional minLedgerTimeRel,
+      Optional deduplicationTime,
+      Optional accessToken,
+      List<@NonNull DisclosedContract> disclosedContracts) {
+    this.workflowId = workflowId;
+    this.applicationId = applicationId;
+    this.commandId = commandId;
+    this.actAs = actAs;
+    this.readAs = readAs;
+    this.minLedgerTimeAbs = minLedgerTimeAbs;
+    this.minLedgerTimeRel = minLedgerTimeRel;
+    this.deduplicationTime = deduplicationTime;
+    this.commands = commands;
+    this.accessToken = accessToken;
+    this.disclosedContracts = disclosedContracts;
+  }
+
+  public static CommandsSubmission create(
+      String applicationId, String commandId, List<@NonNull ? extends HasCommands> commands) {
+    return new CommandsSubmission(
+        applicationId,
+        commandId,
+        commands,
+        emptyList(),
+        emptyList(),
+        empty(),
+        empty(),
+        Optional.empty(),
+        empty(),
+        empty(),
+        emptyList());
+  }
+
+  public Optional getWorkflowId() {
+    return workflowId;
+  }
+
+  public String getApplicationId() {
+    return applicationId;
+  }
+
+  public String getCommandId() {
+    return commandId;
+  }
+
+  public List getActAs() {
+    return unmodifiableList(actAs);
+  }
+
+  public List getReadAs() {
+    return unmodifiableList(readAs);
+  }
+
+  public Optional getMinLedgerTimeAbs() {
+    return minLedgerTimeAbs;
+  }
+
+  public Optional getMinLedgerTimeRel() {
+    return minLedgerTimeRel;
+  }
+
+  public Optional getDeduplicationTime() {
+    return deduplicationTime;
+  }
+
+  public List getCommands() {
+    return unmodifiableList(commands);
+  }
+
+  public Optional getAccessToken() {
+    return accessToken;
+  }
+
+  public List getDisclosedContracts() {
+    return unmodifiableList(disclosedContracts);
+  }
+
+  public CommandsSubmission withWorkflowId(String workflowId) {
+    return new CommandsSubmission(
+        applicationId,
+        commandId,
+        commands,
+        actAs,
+        readAs,
+        Optional.of(workflowId),
+        minLedgerTimeAbs,
+        minLedgerTimeRel,
+        deduplicationTime,
+        accessToken,
+        disclosedContracts);
+  }
+
+  public CommandsSubmission withActAs(String actAs) {
+    return new CommandsSubmission(
+        applicationId,
+        commandId,
+        commands,
+        List.of(actAs),
+        readAs,
+        workflowId,
+        minLedgerTimeAbs,
+        minLedgerTimeRel,
+        deduplicationTime,
+        accessToken,
+        disclosedContracts);
+  }
+
+  public CommandsSubmission withActAs(List<@NonNull String> actAs) {
+    return new CommandsSubmission(
+        applicationId,
+        commandId,
+        commands,
+        actAs,
+        readAs,
+        workflowId,
+        minLedgerTimeAbs,
+        minLedgerTimeRel,
+        deduplicationTime,
+        accessToken,
+        disclosedContracts);
+  }
+
+  public CommandsSubmission withReadAs(List<@NonNull String> readAs) {
+    return new CommandsSubmission(
+        applicationId,
+        commandId,
+        commands,
+        actAs,
+        readAs,
+        workflowId,
+        minLedgerTimeAbs,
+        minLedgerTimeRel,
+        deduplicationTime,
+        accessToken,
+        disclosedContracts);
+  }
+
+  public CommandsSubmission withMinLedgerTimeAbs(Optional minLedgerTimeAbs) {
+    return new CommandsSubmission(
+        applicationId,
+        commandId,
+        commands,
+        actAs,
+        readAs,
+        workflowId,
+        minLedgerTimeAbs,
+        minLedgerTimeRel,
+        deduplicationTime,
+        accessToken,
+        disclosedContracts);
+  }
+
+  public CommandsSubmission withMinLedgerTimeRel(Optional minLedgerTimeRel) {
+    return new CommandsSubmission(
+        applicationId,
+        commandId,
+        commands,
+        actAs,
+        readAs,
+        workflowId,
+        minLedgerTimeAbs,
+        minLedgerTimeRel,
+        deduplicationTime,
+        accessToken,
+        disclosedContracts);
+  }
+
+  public CommandsSubmission withDeduplicationTime(Optional deduplicationTime) {
+    return new CommandsSubmission(
+        applicationId,
+        commandId,
+        commands,
+        actAs,
+        readAs,
+        workflowId,
+        minLedgerTimeAbs,
+        minLedgerTimeRel,
+        deduplicationTime,
+        accessToken,
+        disclosedContracts);
+  }
+
+  public CommandsSubmission withCommands(List<@NonNull ? extends HasCommands> commands) {
+    return new CommandsSubmission(
+        applicationId,
+        commandId,
+        commands,
+        actAs,
+        readAs,
+        workflowId,
+        minLedgerTimeAbs,
+        minLedgerTimeRel,
+        deduplicationTime,
+        accessToken,
+        disclosedContracts);
+  }
+
+  public CommandsSubmission withAccessToken(Optional accessToken) {
+    return new CommandsSubmission(
+        applicationId,
+        commandId,
+        commands,
+        actAs,
+        readAs,
+        workflowId,
+        minLedgerTimeAbs,
+        minLedgerTimeRel,
+        deduplicationTime,
+        accessToken,
+        disclosedContracts);
+  }
+
+  public CommandsSubmission withDisclosedContracts(List disclosedContracts) {
+    return new CommandsSubmission(
+        applicationId,
+        commandId,
+        commands,
+        actAs,
+        readAs,
+        workflowId,
+        minLedgerTimeAbs,
+        minLedgerTimeRel,
+        deduplicationTime,
+        accessToken,
+        disclosedContracts);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CompletionEndResponse.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CompletionEndResponse.java
new file mode 100644
index 0000000000..d5b6909276
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CompletionEndResponse.java
@@ -0,0 +1,44 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.CommandCompletionServiceOuterClass;
+import java.util.Objects;
+
+public final class CompletionEndResponse {
+
+  private final LedgerOffset offset;
+
+  public CompletionEndResponse(LedgerOffset offset) {
+    this.offset = offset;
+  }
+
+  public static CompletionEndResponse fromProto(
+      CommandCompletionServiceOuterClass.CompletionEndResponse response) {
+    return new CompletionEndResponse(LedgerOffset.fromProto(response.getOffset()));
+  }
+
+  public LedgerOffset getOffset() {
+    return offset;
+  }
+
+  @Override
+  public String toString() {
+    return "CompletionEndResponse{" + "offset=" + offset + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    CompletionEndResponse that = (CompletionEndResponse) o;
+    return Objects.equals(offset, that.offset);
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(offset);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CompletionStreamRequest.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CompletionStreamRequest.java
new file mode 100644
index 0000000000..93e6d8a311
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CompletionStreamRequest.java
@@ -0,0 +1,114 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.CommandCompletionServiceOuterClass;
+import java.util.HashSet;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+
+public final class CompletionStreamRequest {
+
+  private final String ledgerId;
+
+  private final String applicationId;
+
+  private final Set parties;
+
+  private final Optional offset;
+
+  public static CompletionStreamRequest fromProto(
+      CommandCompletionServiceOuterClass.CompletionStreamRequest request) {
+    String ledgerId = request.getLedgerId();
+    String applicationId = request.getApplicationId();
+    HashSet parties = new HashSet<>(request.getPartiesList());
+    LedgerOffset offset = LedgerOffset.fromProto(request.getOffset());
+    return new CompletionStreamRequest(ledgerId, applicationId, parties, offset);
+  }
+
+  public CommandCompletionServiceOuterClass.CompletionStreamRequest toProto() {
+    CommandCompletionServiceOuterClass.CompletionStreamRequest.Builder protoBuilder =
+        CommandCompletionServiceOuterClass.CompletionStreamRequest.newBuilder()
+            .setLedgerId(this.ledgerId)
+            .setApplicationId(this.applicationId)
+            .addAllParties(this.parties);
+    this.offset.ifPresent(offset -> protoBuilder.setOffset(offset.toProto()));
+    return protoBuilder.build();
+  }
+
+  @Override
+  public String toString() {
+    return "CompletionStreamRequest{"
+        + "ledgerId='"
+        + ledgerId
+        + '\''
+        + ", applicationId='"
+        + applicationId
+        + '\''
+        + ", parties="
+        + parties
+        + ", offset="
+        + offset
+        + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    CompletionStreamRequest that = (CompletionStreamRequest) o;
+    return Objects.equals(ledgerId, that.ledgerId)
+        && Objects.equals(applicationId, that.applicationId)
+        && Objects.equals(parties, that.parties)
+        && Objects.equals(offset, that.offset);
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(ledgerId, applicationId, parties, offset);
+  }
+
+  public String getLedgerId() {
+
+    return ledgerId;
+  }
+
+  public String getApplicationId() {
+    return applicationId;
+  }
+
+  public Set getParties() {
+    return parties;
+  }
+
+  /**
+   * @deprecated Legacy, nullable version of {@link #getLedgerOffset()}, which should be used
+   *     instead.
+   */
+  @Deprecated
+  public LedgerOffset getOffset() {
+    return offset.orElse(null);
+  }
+
+  public Optional getLedgerOffset() {
+    return offset;
+  }
+
+  public CompletionStreamRequest(String ledgerId, String applicationId, Set parties) {
+    this.ledgerId = ledgerId;
+    this.applicationId = applicationId;
+    this.parties = parties;
+    this.offset = Optional.empty();
+  }
+
+  public CompletionStreamRequest(
+      String ledgerId, String applicationId, Set parties, LedgerOffset offset) {
+    this.ledgerId = ledgerId;
+    this.applicationId = applicationId;
+    this.parties = parties;
+    this.offset = Optional.of(offset);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CompletionStreamResponse.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CompletionStreamResponse.java
new file mode 100644
index 0000000000..c02d2a133c
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CompletionStreamResponse.java
@@ -0,0 +1,78 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.CommandCompletionServiceOuterClass;
+import com.daml.ledger.api.v1.CompletionOuterClass;
+import java.util.List;
+import java.util.Objects;
+import java.util.Optional;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class CompletionStreamResponse {
+
+  private final Optional checkpoint;
+
+  private final List completions;
+
+  public CompletionStreamResponse(
+      @NonNull Optional checkpoint,
+      @NonNull List completions) {
+    this.checkpoint = checkpoint;
+    this.completions = completions;
+  }
+
+  public static CompletionStreamResponse fromProto(
+      CommandCompletionServiceOuterClass.CompletionStreamResponse response) {
+    if (response.hasCheckpoint()) {
+      Checkpoint checkpoint = Checkpoint.fromProto(response.getCheckpoint());
+      return new CompletionStreamResponse(Optional.of(checkpoint), response.getCompletionsList());
+    } else {
+      return new CompletionStreamResponse(Optional.empty(), response.getCompletionsList());
+    }
+  }
+
+  public CommandCompletionServiceOuterClass.CompletionStreamResponse toProto() {
+    CommandCompletionServiceOuterClass.CompletionStreamResponse.Builder builder =
+        CommandCompletionServiceOuterClass.CompletionStreamResponse.newBuilder();
+    this.checkpoint.ifPresent(c -> builder.setCheckpoint(c.toProto()));
+    builder.addAllCompletions(this.completions);
+    return builder.build();
+  }
+
+  @NonNull
+  public Optional getCheckpoint() {
+    return checkpoint;
+  }
+
+  @NonNull
+  public List getCompletions() {
+    return completions;
+  }
+
+  @Override
+  public String toString() {
+    return "CompletionStreamResponse{"
+        + "checkpoint="
+        + checkpoint
+        + ", completions="
+        + completions
+        + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    CompletionStreamResponse that = (CompletionStreamResponse) o;
+    return Objects.equals(checkpoint, that.checkpoint)
+        && Objects.equals(completions, that.completions);
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(checkpoint, completions);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Contract.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Contract.java
new file mode 100644
index 0000000000..fad8ff3c42
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Contract.java
@@ -0,0 +1,6 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+public interface Contract {}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ContractFilter.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ContractFilter.java
new file mode 100644
index 0000000000..a0d484a478
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ContractFilter.java
@@ -0,0 +1,63 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.javaapi.data.codegen.Contract;
+import com.daml.ledger.javaapi.data.codegen.ContractCompanion;
+import com.daml.ledger.javaapi.data.codegen.ContractTypeCompanion;
+import com.daml.ledger.javaapi.data.codegen.InterfaceCompanion;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+/**
+ * This class contains utilities to decode a CreatedEvent and create a 
+ * TransactionFilter by provider parties It can only be instantiated with a subtype of 
+ * ContractCompanion
+ */
+public final class ContractFilter {
+  private final ContractTypeCompanion companion;
+
+  private final Filter filter;
+
+  private ContractFilter(ContractTypeCompanion companion, Filter filter) {
+    this.companion = companion;
+    this.filter = filter;
+  }
+
+  public static  ContractFilter of(ContractCompanion companion) {
+    Filter filter =
+        new InclusiveFilter(
+            Collections.emptyMap(),
+            Collections.singletonMap(
+                companion.TEMPLATE_ID, Filter.Template.HIDE_CREATED_EVENT_BLOB));
+    return new ContractFilter<>(companion, filter);
+  }
+
+  public static  ContractFilter> of(
+      InterfaceCompanion companion) {
+    Filter filter =
+        new InclusiveFilter(
+            Collections.singletonMap(
+                companion.TEMPLATE_ID, Filter.Interface.INCLUDE_VIEW_HIDE_CREATED_EVENT_BLOB),
+            Collections.emptyMap());
+    return new ContractFilter<>(companion, filter);
+  }
+
+  public Ct toContract(CreatedEvent createdEvent) throws IllegalArgumentException {
+    return companion.fromCreatedEvent(createdEvent);
+  }
+
+  public TransactionFilter transactionFilter(Set parties) {
+    return transactionFilter(filter, parties);
+  }
+
+  private static TransactionFilter transactionFilter(Filter filter, Set parties) {
+    Map partyToFilters =
+        parties.stream().collect(Collectors.toMap(Function.identity(), x -> filter));
+    return new FiltersByParty(partyToFilters);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ContractId.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ContractId.java
new file mode 100644
index 0000000000..869df576fa
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ContractId.java
@@ -0,0 +1,44 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.ValueOuterClass;
+import java.util.Objects;
+
+public final class ContractId extends Value {
+
+  private final String value;
+
+  public ContractId(String value) {
+    this.value = value;
+  }
+
+  public String getValue() {
+    return value;
+  }
+
+  @Override
+  public ValueOuterClass.Value toProto() {
+    return ValueOuterClass.Value.newBuilder().setContractId(this.value).build();
+  }
+
+  @Override
+  public String toString() {
+    return "ContractId{" + "value='" + value + '\'' + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    ContractId that = (ContractId) o;
+    return Objects.equals(value, that.value);
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(value);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ContractMetadata.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ContractMetadata.java
new file mode 100644
index 0000000000..934c2af56b
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ContractMetadata.java
@@ -0,0 +1,84 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.ContractMetadataOuterClass;
+import com.google.protobuf.ByteString;
+import java.time.Instant;
+import java.util.Objects;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class ContractMetadata {
+
+  // Note that we can't use a `com.daml.ledger.javaapi.data.Timestamp` here because
+  // it only supports milliseconds-precision and we require lossless conversions through
+  // from/toProto.
+  public final Instant createdAt;
+  public final ByteString driverMetadata;
+  public final ByteString contractKeyHash;
+
+  public static ContractMetadata Empty() {
+    return new ContractMetadata(Instant.EPOCH, ByteString.EMPTY, ByteString.EMPTY);
+  }
+
+  public ContractMetadata(
+      @NonNull Instant createdAt,
+      @NonNull ByteString contractKeyHash,
+      @NonNull ByteString driverMetadata) {
+    this.createdAt = createdAt;
+    this.contractKeyHash = contractKeyHash;
+    this.driverMetadata = driverMetadata;
+  }
+
+  @NonNull
+  public static ContractMetadata fromProto(ContractMetadataOuterClass.ContractMetadata metadata) {
+    return new ContractMetadata(
+        Instant.ofEpochSecond(
+            metadata.getCreatedAt().getSeconds(), metadata.getCreatedAt().getNanos()),
+        metadata.getContractKeyHash(),
+        metadata.getDriverMetadata());
+  }
+
+  public ContractMetadataOuterClass.ContractMetadata toProto() {
+    return ContractMetadataOuterClass.ContractMetadata.newBuilder()
+        .setCreatedAt(
+            com.google.protobuf.Timestamp.newBuilder()
+                .setSeconds(this.createdAt.getEpochSecond())
+                .setNanos(this.createdAt.getNano())
+                .build())
+        .setContractKeyHash(this.contractKeyHash)
+        .setDriverMetadata(this.driverMetadata)
+        .build();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    ContractMetadata that = (ContractMetadata) o;
+    return Objects.equals(createdAt, that.createdAt)
+        && Objects.equals(contractKeyHash, that.contractKeyHash)
+        && Objects.equals(driverMetadata, that.driverMetadata);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(createdAt, contractKeyHash, driverMetadata);
+  }
+
+  @Override
+  public String toString() {
+    return "ContractMetadata{"
+        + "createdAt='"
+        + createdAt
+        + '\''
+        + ", contractKeyHash='"
+        + contractKeyHash
+        + '\''
+        + ", driverMetadata='"
+        + driverMetadata
+        + '\''
+        + '}';
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreateAndExerciseCommand.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreateAndExerciseCommand.java
new file mode 100644
index 0000000000..39f8cce3ea
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreateAndExerciseCommand.java
@@ -0,0 +1,95 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.CommandsOuterClass;
+import java.util.Objects;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class CreateAndExerciseCommand extends Command {
+  private final Identifier templateId;
+
+  private final DamlRecord createArguments;
+
+  private final String choice;
+
+  private final Value choiceArgument;
+
+  public CreateAndExerciseCommand(
+      @NonNull Identifier templateId,
+      @NonNull DamlRecord createArguments,
+      @NonNull String choice,
+      @NonNull Value choiceArgument) {
+    this.templateId = templateId;
+    this.createArguments = createArguments;
+    this.choice = choice;
+    this.choiceArgument = choiceArgument;
+  }
+
+  public static CreateAndExerciseCommand fromProto(
+      CommandsOuterClass.CreateAndExerciseCommand command) {
+    Identifier templateId = Identifier.fromProto(command.getTemplateId());
+    DamlRecord createArguments = DamlRecord.fromProto(command.getCreateArguments());
+    String choice = command.getChoice();
+    Value choiceArgument = Value.fromProto(command.getChoiceArgument());
+    return new CreateAndExerciseCommand(templateId, createArguments, choice, choiceArgument);
+  }
+
+  public CommandsOuterClass.CreateAndExerciseCommand toProto() {
+    return CommandsOuterClass.CreateAndExerciseCommand.newBuilder()
+        .setTemplateId(this.templateId.toProto())
+        .setCreateArguments(this.createArguments.toProtoRecord())
+        .setChoice(this.choice)
+        .setChoiceArgument(this.choiceArgument.toProto())
+        .build();
+  }
+
+  @Override
+  Identifier getTemplateId() {
+    return templateId;
+  }
+
+  public DamlRecord getCreateArguments() {
+    return createArguments;
+  }
+
+  public String getChoice() {
+    return choice;
+  }
+
+  public Value getChoiceArgument() {
+    return choiceArgument;
+  }
+
+  @Override
+  public String toString() {
+    return "CreateAndExerciseCommand{"
+        + "templateId="
+        + templateId
+        + ", createArguments="
+        + createArguments
+        + ", choice='"
+        + choice
+        + '\''
+        + ", choiceArgument="
+        + choiceArgument
+        + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    CreateAndExerciseCommand that = (CreateAndExerciseCommand) o;
+    return templateId.equals(that.templateId)
+        && createArguments.equals(that.createArguments)
+        && choice.equals(that.choice)
+        && choiceArgument.equals(that.choiceArgument);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(templateId, createArguments, choice, choiceArgument);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreateCommand.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreateCommand.java
new file mode 100644
index 0000000000..7ecd7210f1
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreateCommand.java
@@ -0,0 +1,69 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.CommandsOuterClass;
+import java.util.Objects;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class CreateCommand extends Command {
+
+  private final Identifier templateId;
+
+  private final DamlRecord createArguments;
+
+  public CreateCommand(@NonNull Identifier templateId, @NonNull DamlRecord createArguments) {
+    this.templateId = templateId;
+    this.createArguments = createArguments;
+  }
+
+  @Override
+  public String toString() {
+    return "CreateCommand{"
+        + "templateId="
+        + templateId
+        + ", createArguments="
+        + createArguments
+        + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    CreateCommand that = (CreateCommand) o;
+    return Objects.equals(templateId, that.templateId)
+        && Objects.equals(createArguments, that.createArguments);
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(templateId, createArguments);
+  }
+
+  @NonNull
+  @Override
+  public Identifier getTemplateId() {
+    return templateId;
+  }
+
+  @NonNull
+  public DamlRecord getCreateArguments() {
+    return createArguments;
+  }
+
+  public static CreateCommand fromProto(CommandsOuterClass.CreateCommand create) {
+    DamlRecord createArgument = DamlRecord.fromProto(create.getCreateArguments());
+    Identifier templateId = Identifier.fromProto(create.getTemplateId());
+    return new CreateCommand(templateId, createArgument);
+  }
+
+  public CommandsOuterClass.CreateCommand toProto() {
+    return CommandsOuterClass.CreateCommand.newBuilder()
+        .setTemplateId(this.templateId.toProto())
+        .setCreateArguments(this.createArguments.toProtoRecord())
+        .build();
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreateUserRequest.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreateUserRequest.java
new file mode 100644
index 0000000000..5d5b3b46ca
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreateUserRequest.java
@@ -0,0 +1,62 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.admin.UserManagementServiceOuterClass;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Objects;
+import java.util.stream.Collectors;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class CreateUserRequest {
+
+  private final User user;
+  private final List rights;
+
+  public CreateUserRequest(User user, User.Right right, User.Right... rights) {
+    this.user = user;
+    this.rights = new ArrayList<>(rights.length + 1);
+    this.rights.add(right);
+    this.rights.addAll(Arrays.asList(rights));
+  }
+
+  public CreateUserRequest(@NonNull String id, @NonNull String primaryParty) {
+    this(new User(id, primaryParty), new User.Right.CanActAs(primaryParty));
+  }
+
+  public User getUser() {
+    return user;
+  }
+
+  public List getRights() {
+    return new ArrayList<>(rights);
+  }
+
+  public UserManagementServiceOuterClass.CreateUserRequest toProto() {
+    return UserManagementServiceOuterClass.CreateUserRequest.newBuilder()
+        .setUser(this.user.toProto())
+        .addAllRights(this.rights.stream().map(User.Right::toProto).collect(Collectors.toList()))
+        .build();
+  }
+
+  @Override
+  public String toString() {
+    return "CreateUserRequest{" + "user=" + user + ", rights=" + rights + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    CreateUserRequest that = (CreateUserRequest) o;
+    return user.equals(that.user) && rights.equals(that.rights);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(user, rights);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreateUserResponse.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreateUserResponse.java
new file mode 100644
index 0000000000..10eaad106f
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreateUserResponse.java
@@ -0,0 +1,49 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.admin.UserManagementServiceOuterClass;
+import java.util.Objects;
+
+public final class CreateUserResponse {
+
+  private final User user;
+
+  public CreateUserResponse(User user) {
+    this.user = user;
+  }
+
+  public User getUser() {
+    return user;
+  }
+
+  public static CreateUserResponse fromProto(
+      UserManagementServiceOuterClass.CreateUserResponse proto) {
+    return new CreateUserResponse(User.fromProto(proto.getUser()));
+  }
+
+  public UserManagementServiceOuterClass.CreateUserResponse toProto() {
+    return UserManagementServiceOuterClass.CreateUserResponse.newBuilder()
+        .setUser(this.user.toProto())
+        .build();
+  }
+
+  @Override
+  public String toString() {
+    return "CreateUserResponse{" + "user=" + user + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    CreateUserResponse that = (CreateUserResponse) o;
+    return user.equals(that.user);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(user);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreatedEvent.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreatedEvent.java
new file mode 100644
index 0000000000..091b5892f5
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/CreatedEvent.java
@@ -0,0 +1,361 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.EventOuterClass;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.StringValue;
+import com.google.rpc.Status;
+import java.time.Instant;
+import java.util.*;
+import java.util.function.BiFunction;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class CreatedEvent implements Event, TreeEvent {
+
+  private final @NonNull List<@NonNull String> witnessParties;
+
+  private final String eventId;
+
+  private final Identifier templateId;
+
+  private final String contractId;
+
+  private final DamlRecord arguments;
+
+  private final @NonNull Map<@NonNull Identifier, @NonNull DamlRecord> interfaceViews;
+
+  private final @NonNull Map<@NonNull Identifier, @NonNull Status> failedInterfaceViews;
+
+  private final Optional agreementText;
+
+  private final Optional contractKey;
+
+  private final @NonNull Set<@NonNull String> signatories;
+
+  private final @NonNull Set<@NonNull String> observers;
+
+  private final @NonNull ByteString createdEventBlob;
+
+  // Note that we can't use a `com.daml.ledger.javaapi.data.Timestamp` here because
+  // it only supports microseconds-precision and we require lossless conversions through
+  // from/toProto.
+  public final @NonNull Instant createdAt;
+
+  public CreatedEvent(
+      @NonNull List<@NonNull String> witnessParties,
+      @NonNull String eventId,
+      @NonNull Identifier templateId,
+      @NonNull String contractId,
+      @NonNull DamlRecord arguments,
+      @NonNull ByteString createdEventBlob,
+      @NonNull Map<@NonNull Identifier, @NonNull DamlRecord> interfaceViews,
+      @NonNull Map<@NonNull Identifier, com.google.rpc.@NonNull Status> failedInterfaceViews,
+      @NonNull Optional agreementText,
+      @NonNull Optional contractKey,
+      @NonNull Collection<@NonNull String> signatories,
+      @NonNull Collection<@NonNull String> observers,
+      @NonNull Instant createdAt) {
+    this.witnessParties = List.copyOf(witnessParties);
+    this.eventId = eventId;
+    this.templateId = templateId;
+    this.contractId = contractId;
+    this.arguments = arguments;
+    this.createdEventBlob = createdEventBlob;
+    this.interfaceViews = Map.copyOf(interfaceViews);
+    this.failedInterfaceViews = Map.copyOf(failedInterfaceViews);
+    this.agreementText = agreementText;
+    this.contractKey = contractKey;
+    this.signatories = Set.copyOf(signatories);
+    this.observers = Set.copyOf(observers);
+    this.createdAt = createdAt;
+  }
+
+  /**
+   * @deprecated You should pass {@code createArgumentsBlob} and {@code contractMetadata} arguments
+   *     as well. Since Daml 2.6.0
+   */
+  @Deprecated
+  public CreatedEvent(
+      @NonNull List<@NonNull String> witnessParties,
+      @NonNull String eventId,
+      @NonNull Identifier templateId,
+      @NonNull String contractId,
+      @NonNull DamlRecord arguments,
+      @NonNull Map<@NonNull Identifier, @NonNull DamlRecord> interfaceViews,
+      @NonNull Map<@NonNull Identifier, com.google.rpc.@NonNull Status> failedInterfaceViews,
+      @NonNull Optional agreementText,
+      @NonNull Optional contractKey,
+      @NonNull Collection<@NonNull String> signatories,
+      @NonNull Collection<@NonNull String> observers) {
+    this(
+        witnessParties,
+        eventId,
+        templateId,
+        contractId,
+        arguments,
+        ByteString.EMPTY,
+        interfaceViews,
+        failedInterfaceViews,
+        agreementText,
+        contractKey,
+        signatories,
+        observers,
+        Instant.EPOCH);
+  }
+
+  /**
+   * @deprecated Pass {@code interfaceViews} and {@code failedInterfaceViews} arguments; empty maps
+   *     are reasonable defaults. Since Daml 2.4.0
+   */
+  @Deprecated
+  public CreatedEvent(
+      @NonNull List<@NonNull String> witnessParties,
+      @NonNull String eventId,
+      @NonNull Identifier templateId,
+      @NonNull String contractId,
+      @NonNull DamlRecord arguments,
+      @NonNull Optional agreementText,
+      @NonNull Optional contractKey,
+      @NonNull Collection<@NonNull String> signatories,
+      @NonNull Collection<@NonNull String> observers) {
+    this(
+        witnessParties,
+        eventId,
+        templateId,
+        contractId,
+        arguments,
+        Collections.emptyMap(),
+        Collections.emptyMap(),
+        agreementText,
+        contractKey,
+        signatories,
+        observers);
+  }
+
+  @NonNull
+  @Override
+  public List<@NonNull String> getWitnessParties() {
+    return witnessParties;
+  }
+
+  @NonNull
+  @Override
+  public String getEventId() {
+    return eventId;
+  }
+
+  @NonNull
+  @Override
+  public Identifier getTemplateId() {
+    return templateId;
+  }
+
+  @NonNull
+  @Override
+  public String getContractId() {
+    return contractId;
+  }
+
+  @NonNull
+  public DamlRecord getArguments() {
+    return arguments;
+  }
+
+  public ByteString getCreatedEventBlob() {
+    return createdEventBlob;
+  }
+
+  @NonNull
+  public Map<@NonNull Identifier, @NonNull DamlRecord> getInterfaceViews() {
+    return interfaceViews;
+  }
+
+  @NonNull
+  public Map<@NonNull Identifier, @NonNull Status> getFailedInterfaceViews() {
+    return failedInterfaceViews;
+  }
+
+  @NonNull
+  public Optional getAgreementText() {
+    return agreementText;
+  }
+
+  @NonNull
+  public Optional getContractKey() {
+    return contractKey;
+  }
+
+  @NonNull
+  public Set<@NonNull String> getSignatories() {
+    return signatories;
+  }
+
+  @NonNull
+  public Set<@NonNull String> getObservers() {
+    return observers;
+  }
+
+  /**
+   * {@code createdAt} has been introduced in the Ledger API {@link
+   * com.daml.ledger.api.v1.EventOuterClass.CreatedEvent} starting with Canton version 2.8.0. Events
+   * sourced from the Ledger API prior to this version will return the default {@link Instant#EPOCH}
+   * value.
+   */
+  @NonNull
+  public Instant getCreatedAt() {
+    return createdAt;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    CreatedEvent that = (CreatedEvent) o;
+    return Objects.equals(witnessParties, that.witnessParties)
+        && Objects.equals(eventId, that.eventId)
+        && Objects.equals(templateId, that.templateId)
+        && Objects.equals(contractId, that.contractId)
+        && Objects.equals(arguments, that.arguments)
+        && Objects.equals(createdEventBlob, that.createdEventBlob)
+        && Objects.equals(interfaceViews, that.interfaceViews)
+        && Objects.equals(failedInterfaceViews, that.failedInterfaceViews)
+        && Objects.equals(agreementText, that.agreementText)
+        && Objects.equals(contractKey, that.contractKey)
+        && Objects.equals(signatories, that.signatories)
+        && Objects.equals(observers, that.observers)
+        && Objects.equals(createdAt, that.createdAt);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(
+        witnessParties,
+        eventId,
+        templateId,
+        contractId,
+        arguments,
+        createdEventBlob,
+        interfaceViews,
+        failedInterfaceViews,
+        agreementText,
+        contractKey,
+        signatories,
+        observers,
+        createdAt);
+  }
+
+  @Override
+  public String toString() {
+    return "CreatedEvent{"
+        + "witnessParties="
+        + witnessParties
+        + ", eventId='"
+        + eventId
+        + '\''
+        + ", templateId="
+        + templateId
+        + ", contractId='"
+        + contractId
+        + '\''
+        + ", arguments="
+        + arguments
+        + ", createdEventBlob="
+        + createdEventBlob
+        + ", interfaceViews="
+        + interfaceViews
+        + ", failedInterfaceViews="
+        + failedInterfaceViews
+        + ", agreementText='"
+        + agreementText
+        + "', contractKey="
+        + contractKey
+        + ", signatories="
+        + signatories
+        + ", observers="
+        + observers
+        + ", createdAt="
+        + createdAt
+        + '}';
+  }
+
+  @SuppressWarnings("deprecation")
+  public EventOuterClass.@NonNull CreatedEvent toProto() {
+    EventOuterClass.CreatedEvent.Builder builder =
+        EventOuterClass.CreatedEvent.newBuilder()
+            .setContractId(this.getContractId())
+            .setCreateArguments(this.getArguments().toProtoRecord())
+            .setCreatedEventBlob(createdEventBlob)
+            .addAllInterfaceViews(
+                Stream.concat(
+                        toProtoInterfaceViews(
+                            interfaceViews, (b, dr) -> b.setViewValue(dr.toProtoRecord())),
+                        toProtoInterfaceViews(
+                            failedInterfaceViews, (b, status) -> b.setViewStatus(status)))
+                    .collect(Collectors.toUnmodifiableList()))
+            .setEventId(this.getEventId())
+            .setTemplateId(this.getTemplateId().toProto())
+            .addAllWitnessParties(this.getWitnessParties())
+            .addAllSignatories(this.getSignatories())
+            .addAllObservers(this.getObservers())
+            .setCreatedAt(
+                com.google.protobuf.Timestamp.newBuilder()
+                    .setSeconds(this.createdAt.getEpochSecond())
+                    .setNanos(this.createdAt.getNano())
+                    .build());
+    agreementText.ifPresent(a -> builder.setAgreementText(StringValue.of(a)));
+    contractKey.ifPresent(a -> builder.setContractKey(a.toProto()));
+    return builder.build();
+  }
+
+  private static  Stream toProtoInterfaceViews(
+      Map views,
+      BiFunction
+          addV) {
+    return views.entrySet().stream()
+        .map(
+            e ->
+                addV.apply(
+                        EventOuterClass.InterfaceView.newBuilder()
+                            .setInterfaceId(e.getKey().toProto()),
+                        e.getValue())
+                    .build());
+  }
+
+  @SuppressWarnings("deprecation")
+  public static CreatedEvent fromProto(EventOuterClass.CreatedEvent createdEvent) {
+    var splitInterfaceViews =
+        createdEvent.getInterfaceViewsList().stream()
+            .collect(Collectors.partitioningBy(EventOuterClass.InterfaceView::hasViewValue));
+    return new CreatedEvent(
+        createdEvent.getWitnessPartiesList(),
+        createdEvent.getEventId(),
+        Identifier.fromProto(createdEvent.getTemplateId()),
+        createdEvent.getContractId(),
+        DamlRecord.fromProto(createdEvent.getCreateArguments()),
+        createdEvent.getCreatedEventBlob(),
+        splitInterfaceViews.get(true).stream()
+            .collect(
+                Collectors.toUnmodifiableMap(
+                    iv -> Identifier.fromProto(iv.getInterfaceId()),
+                    iv -> DamlRecord.fromProto(iv.getViewValue()))),
+        splitInterfaceViews.get(false).stream()
+            .collect(
+                Collectors.toUnmodifiableMap(
+                    iv -> Identifier.fromProto(iv.getInterfaceId()),
+                    EventOuterClass.InterfaceView::getViewStatus)),
+        createdEvent.hasAgreementText()
+            ? Optional.of(createdEvent.getAgreementText().getValue())
+            : Optional.empty(),
+        createdEvent.hasContractKey()
+            ? Optional.of(Value.fromProto(createdEvent.getContractKey()))
+            : Optional.empty(),
+        createdEvent.getSignatoriesList(),
+        createdEvent.getObserversList(),
+        Instant.ofEpochSecond(
+            createdEvent.getCreatedAt().getSeconds(), createdEvent.getCreatedAt().getNanos()));
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlCollectors.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlCollectors.java
new file mode 100644
index 0000000000..8c642ee4d9
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlCollectors.java
@@ -0,0 +1,64 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import java.util.*;
+import java.util.function.Function;
+import java.util.stream.Collector;
+
+public final class DamlCollectors {
+
+  // no instantiation
+  private DamlCollectors() {}
+
+  public static  Collector, DamlList> toDamlList(Function valueMapper) {
+    return Collector.of(
+        ArrayList::new,
+        (acc, entry) -> acc.add(valueMapper.apply(entry)),
+        (left, right) -> {
+          left.addAll(right);
+          return left;
+        },
+        DamlList::fromPrivateList);
+  }
+
+  public static Collector, DamlList> toDamlList() {
+    return toDamlList(Function.identity());
+  }
+
+  public static  Collector, DamlTextMap> toDamlTextMap(
+      Function keyMapper, Function valueMapper) {
+
+    return Collector.of(
+        HashMap::new,
+        (acc, entry) -> acc.put(keyMapper.apply(entry), valueMapper.apply(entry)),
+        (left, right) -> {
+          left.putAll(right);
+          return left;
+        },
+        DamlTextMap::fromPrivateMap);
+  }
+
+  public static Collector, Map, DamlTextMap>
+      toDamlTextMap() {
+    return toDamlTextMap(Map.Entry::getKey, Map.Entry::getValue);
+  }
+
+  public static  Collector, DamlGenMap> toDamlGenMap(
+      Function keyMapper, Function valueMapper) {
+
+    return Collector.of(
+        LinkedHashMap::new,
+        (acc, entry) -> acc.put(keyMapper.apply(entry), valueMapper.apply(entry)),
+        (left, right) -> {
+          left.putAll(right);
+          return left;
+        },
+        DamlGenMap::fromPrivateMap);
+  }
+
+  public static Collector, Map, DamlGenMap> toMap() {
+    return toDamlGenMap(Map.Entry::getKey, Map.Entry::getValue);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlEnum.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlEnum.java
new file mode 100644
index 0000000000..24af2e0969
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlEnum.java
@@ -0,0 +1,76 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.ValueOuterClass;
+import java.util.Objects;
+import java.util.Optional;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class DamlEnum extends Value {
+
+  private final Optional enumId;
+
+  private final String constructor;
+
+  public DamlEnum(@NonNull Identifier enumId, @NonNull String constructor) {
+    this.enumId = Optional.of(enumId);
+    this.constructor = constructor;
+  }
+
+  public DamlEnum(@NonNull String constructor) {
+    this.enumId = Optional.empty();
+    this.constructor = constructor;
+  }
+
+  public static DamlEnum fromProto(ValueOuterClass.Enum value) {
+    String constructor = value.getConstructor();
+    if (value.hasEnumId()) {
+      Identifier variantId = Identifier.fromProto(value.getEnumId());
+      return new DamlEnum(variantId, constructor);
+    } else {
+      return new DamlEnum(constructor);
+    }
+  }
+
+  @NonNull
+  public Optional getEnumId() {
+    return enumId;
+  }
+
+  @NonNull
+  public String getConstructor() {
+    return constructor;
+  }
+
+  @Override
+  public ValueOuterClass.Value toProto() {
+    return ValueOuterClass.Value.newBuilder().setEnum(this.toProtoEnum()).build();
+  }
+
+  public ValueOuterClass.Enum toProtoEnum() {
+    ValueOuterClass.Enum.Builder builder = ValueOuterClass.Enum.newBuilder();
+    builder.setConstructor(this.getConstructor());
+    this.getEnumId().ifPresent(identifier -> builder.setEnumId(identifier.toProto()));
+    return builder.build();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    DamlEnum value = (DamlEnum) o;
+    return Objects.equals(enumId, value.enumId) && Objects.equals(constructor, value.constructor);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(enumId, constructor);
+  }
+
+  @Override
+  public String toString() {
+    return "Enum{" + "variantId=" + enumId + ", constructor='" + constructor + "'}";
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlGenMap.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlGenMap.java
new file mode 100644
index 0000000000..9a82711869
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlGenMap.java
@@ -0,0 +1,89 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.ValueOuterClass;
+import java.util.*;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class DamlGenMap extends Value {
+
+  private final Map map;
+
+  private DamlGenMap(@NonNull Map<@NonNull Value, @NonNull Value> map) {
+    this.map = map;
+  }
+
+  /** The map that is passed to this constructor must not be changed once passed. */
+  static @NonNull DamlGenMap fromPrivateMap(@NonNull Map<@NonNull Value, @NonNull Value> map) {
+    return new DamlGenMap(Collections.unmodifiableMap(map));
+  }
+
+  public static DamlGenMap of(@NonNull Map<@NonNull Value, @NonNull Value> map) {
+    return fromPrivateMap(new LinkedHashMap<>(map));
+  }
+
+  public Stream> stream() {
+    return map.entrySet().stream();
+  }
+
+  public @NonNull  Map<@NonNull K, @NonNull V> toMap(
+      @NonNull Function<@NonNull Value, @NonNull K> keyMapper,
+      @NonNull Function<@NonNull Value, @NonNull V> valueMapper) {
+    return stream()
+        .collect(
+            Collectors.toMap(
+                e -> keyMapper.apply(e.getKey()),
+                e -> valueMapper.apply(e.getValue()),
+                (left, right) -> right,
+                LinkedHashMap::new));
+  }
+
+  public @NonNull  Map<@NonNull V, @NonNull V> toMap(
+      @NonNull Function<@NonNull Value, @NonNull V> valueMapper) {
+    return toMap(valueMapper, valueMapper);
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    DamlGenMap other = (DamlGenMap) o;
+    return Objects.equals(map, other.map);
+  }
+
+  @Override
+  public int hashCode() {
+    return map.hashCode();
+  }
+
+  @Override
+  public @NonNull String toString() {
+    StringJoiner sj = new StringJoiner(", ", "GenMap{", "}");
+    map.forEach((key, value) -> sj.add(key.toString() + " -> " + value.toString()));
+    return sj.toString();
+  }
+
+  @Override
+  public ValueOuterClass.Value toProto() {
+    ValueOuterClass.GenMap.Builder mb = ValueOuterClass.GenMap.newBuilder();
+    map.forEach(
+        (key, value) ->
+            mb.addEntries(
+                ValueOuterClass.GenMap.Entry.newBuilder()
+                    .setKey(key.toProto())
+                    .setValue(value.toProto())));
+    return ValueOuterClass.Value.newBuilder().setGenMap(mb).build();
+  }
+
+  public static @NonNull DamlGenMap fromProto(ValueOuterClass.GenMap map) {
+    return map.getEntriesList().stream()
+        .collect(
+            DamlCollectors.toDamlGenMap(
+                entry -> fromProto(entry.getKey()), entry -> fromProto(entry.getValue())));
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlList.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlList.java
new file mode 100644
index 0000000000..cea4f1b419
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlList.java
@@ -0,0 +1,87 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.ValueOuterClass;
+import java.util.*;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class DamlList extends Value {
+
+  private List values;
+
+  private DamlList() {}
+
+  /** The list that is passed to this constructor must not be change once passed. */
+  static @NonNull DamlList fromPrivateList(@NonNull List<@NonNull Value> values) {
+    DamlList damlList = new DamlList();
+    damlList.values = Collections.unmodifiableList(values);
+    return damlList;
+  }
+
+  public static DamlList of(@NonNull List<@NonNull Value> values) {
+    return fromPrivateList(new ArrayList<>(values));
+  }
+
+  public static DamlList of(@NonNull Value... values) {
+    return fromPrivateList(Arrays.asList(values));
+  }
+
+  @Deprecated // use DamlList:of
+  public DamlList(@NonNull List<@NonNull Value> values) {
+    this.values = values;
+  }
+
+  @Deprecated // use DamlMap:of
+  public DamlList(@NonNull Value... values) {
+    this(Arrays.asList(values));
+  }
+
+  @Deprecated // use DamlMap::stream or DamlMap::toListf
+  public @NonNull List<@NonNull Value> getValues() {
+    return toList(Function.identity());
+  }
+
+  public @NonNull Stream stream() {
+    return values.stream();
+  }
+
+  public @NonNull  List toList(Function valueMapper) {
+    return stream().map(valueMapper).collect(Collectors.toList());
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    DamlList list = (DamlList) o;
+    return Objects.equals(values, list.values);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(values);
+  }
+
+  @Override
+  public String toString() {
+    return "DamlList{" + "values=" + values + '}';
+  }
+
+  @Override
+  public ValueOuterClass.Value toProto() {
+    ValueOuterClass.List.Builder builder = ValueOuterClass.List.newBuilder();
+    for (Value value : this.values) {
+      builder.addElements(value.toProto());
+    }
+    return ValueOuterClass.Value.newBuilder().setList(builder.build()).build();
+  }
+
+  public static @NonNull DamlList fromProto(ValueOuterClass.List list) {
+    return list.getElementsList().stream().collect(DamlCollectors.toDamlList(Value::fromProto));
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlMap.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlMap.java
new file mode 100644
index 0000000000..da13834682
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlMap.java
@@ -0,0 +1,17 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import java.util.*;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+// FIXME When removing this after the deprecation period is over, make DamlTextMap final
+/** @deprecated Use {@link DamlTextMap} instead. */
+@Deprecated
+public final class DamlMap extends DamlTextMap {
+
+  public DamlMap(Map<@NonNull String, @NonNull Value> value) {
+    super(Collections.unmodifiableMap(new HashMap<>(value)));
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlOptional.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlOptional.java
new file mode 100644
index 0000000000..58c6692a57
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlOptional.java
@@ -0,0 +1,80 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.ValueOuterClass;
+import java.util.*;
+import java.util.function.Function;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class DamlOptional extends Value {
+
+  public static DamlOptional EMPTY = new DamlOptional(Optional.empty());
+
+  private final Value value;
+
+  DamlOptional(Value value) {
+    this.value = value;
+  }
+
+  @Deprecated // use DamlOptional.of
+  public DamlOptional(Optional<@NonNull Value> value) {
+    this(value.orElse(null));
+  }
+
+  public static DamlOptional of(@NonNull Optional<@NonNull Value> value) {
+    if (value.isPresent()) return new DamlOptional((value.get()));
+    else return EMPTY;
+  }
+
+  public static DamlOptional of(Value value) {
+    return new DamlOptional(value);
+  }
+
+  public java.util.Optional getValue() {
+    return java.util.Optional.ofNullable(value);
+  }
+
+  public @NonNull  Optional toOptional(Function<@NonNull Value, @NonNull V> valueMapper) {
+    return (value == null) ? Optional.empty() : Optional.of(valueMapper.apply(value));
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    DamlOptional optional = (DamlOptional) o;
+    return Objects.equals(value, optional.value);
+  }
+
+  public boolean isEmpty() {
+    return value == null;
+  }
+
+  @Override
+  public int hashCode() {
+    return (value == null) ? 0 : value.hashCode();
+  }
+
+  @Override
+  public @NonNull String toString() {
+    return "Optional{" + "value=" + value + '}';
+  }
+
+  @Deprecated // use DamlOptional::EMPTY
+  public static @NonNull DamlOptional empty() {
+    return EMPTY;
+  }
+
+  @Override
+  public ValueOuterClass.Value toProto() {
+    ValueOuterClass.Optional.Builder ob = ValueOuterClass.Optional.newBuilder();
+    if (value != null) ob.setValue(value.toProto());
+    return ValueOuterClass.Value.newBuilder().setOptional(ob.build()).build();
+  }
+
+  public static DamlOptional fromProto(ValueOuterClass.Optional optional) {
+    return (optional.hasValue()) ? new DamlOptional(fromProto(optional.getValue())) : EMPTY;
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlRecord.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlRecord.java
new file mode 100644
index 0000000000..6c665b074f
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlRecord.java
@@ -0,0 +1,181 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.ValueOuterClass;
+import java.util.*;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public class DamlRecord extends Value {
+
+  private final Optional recordId;
+
+  private final Map fieldsMap;
+
+  private final List fields;
+
+  public DamlRecord(@NonNull Identifier recordId, @NonNull Field... fields) {
+    this(recordId, Arrays.asList(fields));
+  }
+
+  public DamlRecord(@NonNull Field... fields) {
+    this(Arrays.asList(fields));
+  }
+
+  public DamlRecord(@NonNull Identifier recordId, @NonNull List<@NonNull Field> fields) {
+    this(Optional.of(recordId), fields, fieldsListToHashMap(fields));
+  }
+
+  public DamlRecord(@NonNull List<@NonNull Field> fields) {
+    this(Optional.empty(), fields, fieldsListToHashMap(fields));
+  }
+
+  public DamlRecord(
+      @NonNull Optional recordId,
+      @NonNull List<@NonNull Field> fields,
+      Map fieldsMap) {
+    this.recordId = recordId;
+    this.fields = fields;
+    this.fieldsMap = fieldsMap;
+  }
+
+  private static Map fieldsListToHashMap(@NonNull List<@NonNull Field> fields) {
+    if (fields.isEmpty() || !fields.get(0).getLabel().isPresent()) {
+      return Collections.emptyMap();
+    } else {
+      HashMap fieldsMap = new HashMap<>(fields.size());
+      for (Field field : fields) {
+        fieldsMap.put(field.getLabel().get(), field.getValue());
+      }
+      return fieldsMap;
+    }
+  }
+
+  @NonNull
+  public static DamlRecord fromProto(ValueOuterClass.Record record) {
+    ArrayList fields = new ArrayList<>(record.getFieldsCount());
+    HashMap fieldsMap = new HashMap<>(record.getFieldsCount());
+    for (ValueOuterClass.RecordField recordField : record.getFieldsList()) {
+      Field field = Field.fromProto(recordField);
+      fields.add(field);
+      if (field.getLabel().isPresent()) {
+        fieldsMap.put(field.getLabel().get(), field.getValue());
+      }
+    }
+    if (record.hasRecordId()) {
+      Identifier recordId = Identifier.fromProto(record.getRecordId());
+      return new DamlRecord(Optional.of(recordId), fields, fieldsMap);
+    } else {
+      return new DamlRecord(Optional.empty(), fields, fieldsMap);
+    }
+  }
+
+  @Override
+  public ValueOuterClass.Value toProto() {
+    return ValueOuterClass.Value.newBuilder().setRecord(this.toProtoRecord()).build();
+  }
+
+  public ValueOuterClass.Record toProtoRecord() {
+    ValueOuterClass.Record.Builder recordBuilder = ValueOuterClass.Record.newBuilder();
+    this.recordId.ifPresent(recordId -> recordBuilder.setRecordId(recordId.toProto()));
+    for (Field field : this.fields) {
+      recordBuilder.addFields(field.toProto());
+    }
+    return recordBuilder.build();
+  }
+
+  @NonNull
+  public Optional getRecordId() {
+    return recordId;
+  }
+
+  @NonNull
+  public List getFields() {
+    return fields;
+  }
+
+  /** @return the Map of this DamlRecord fields containing the records that have the label */
+  @NonNull
+  public Map<@NonNull String, @NonNull Value> getFieldsMap() {
+    return fieldsMap;
+  }
+
+  @Override
+  public String toString() {
+    return "DamlRecord{" + "recordId=" + recordId + ", fields=" + fields + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    DamlRecord record = (DamlRecord) o;
+    return Objects.equals(recordId, record.recordId) && Objects.equals(fields, record.fields);
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(recordId, fields);
+  }
+
+  public static class Field {
+
+    private final Optional label;
+
+    private final Value value;
+
+    public Field(@NonNull String label, @NonNull Value value) {
+      this.label = Optional.of(label);
+      this.value = value;
+    }
+
+    public Field(@NonNull Value value) {
+      this.label = Optional.empty();
+      this.value = value;
+    }
+
+    @NonNull
+    public Optional getLabel() {
+      return label;
+    }
+
+    @NonNull
+    public Value getValue() {
+      return value;
+    }
+
+    public static Field fromProto(ValueOuterClass.RecordField field) {
+      String label = field.getLabel();
+      Value value = Value.fromProto(field.getValue());
+      return label.isEmpty() ? new Field(value) : new Field(label, value);
+    }
+
+    public ValueOuterClass.RecordField toProto() {
+      ValueOuterClass.RecordField.Builder builder = ValueOuterClass.RecordField.newBuilder();
+      this.label.ifPresent(builder::setLabel);
+      builder.setValue(this.value.toProto());
+      return builder.build();
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+      Field field = (Field) o;
+      return Objects.equals(label, field.label) && Objects.equals(value, field.value);
+    }
+
+    @Override
+    public int hashCode() {
+
+      return Objects.hash(label, value);
+    }
+
+    @Override
+    public String toString() {
+      return "Field{" + "label=" + label + ", value=" + value + '}';
+    }
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlTextMap.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlTextMap.java
new file mode 100644
index 0000000000..092723f9b2
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DamlTextMap.java
@@ -0,0 +1,90 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.ValueOuterClass;
+import java.util.*;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public class DamlTextMap extends Value {
+
+  private final Map map;
+
+  DamlTextMap(Map map) {
+    this.map = map;
+  }
+
+  /** The map that is passed to this constructor must not be changed once passed. */
+  static @NonNull DamlTextMap fromPrivateMap(Map<@NonNull String, @NonNull Value> value) {
+    return new DamlTextMap(Collections.unmodifiableMap(value));
+  }
+
+  private static @NonNull DamlTextMap EMPTY = fromPrivateMap(Collections.emptyMap());
+
+  public static DamlTextMap of(@NonNull Map<@NonNull String, @NonNull Value> value) {
+    return fromPrivateMap(new HashMap<>(value));
+  }
+
+  @Deprecated // use DamlTextMap::toMap or DamlTextMap::stream
+  public final @NonNull Map<@NonNull String, @NonNull Value> getMap() {
+    return toMap(Function.identity());
+  }
+
+  public Stream> stream() {
+    return map.entrySet().stream();
+  }
+
+  public final @NonNull  Map toMap(
+      @NonNull Function<@NonNull String, @NonNull K> keyMapper,
+      @NonNull Function<@NonNull Value, @NonNull V> valueMapper) {
+    return stream()
+        .collect(
+            Collectors.toMap(
+                e -> keyMapper.apply(e.getKey()), e -> valueMapper.apply(e.getValue())));
+  }
+
+  public final @NonNull  Map<@NonNull String, @NonNull V> toMap(
+      @NonNull Function<@NonNull Value, @NonNull V> valueMapper) {
+    return toMap(Function.identity(), valueMapper);
+  }
+
+  @Override
+  public final boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    DamlTextMap other = (DamlTextMap) o;
+    return Objects.equals(map, other.map);
+  }
+
+  @Override
+  public final int hashCode() {
+    return map.hashCode();
+  }
+
+  @Override
+  public final @NonNull String toString() {
+    StringJoiner sj = new StringJoiner(", ", "TextMap{", "}");
+    map.forEach((key, value) -> sj.add(key + "->" + value.toString()));
+    return sj.toString();
+  }
+
+  @Override
+  public final ValueOuterClass.Value toProto() {
+    ValueOuterClass.Map.Builder mb = ValueOuterClass.Map.newBuilder();
+    map.forEach(
+        (k, v) ->
+            mb.addEntries(ValueOuterClass.Map.Entry.newBuilder().setKey(k).setValue(v.toProto())));
+    return ValueOuterClass.Value.newBuilder().setMap(mb).build();
+  }
+
+  public static @NonNull DamlTextMap fromProto(ValueOuterClass.Map map) {
+    return map.getEntriesList().stream()
+        .collect(
+            DamlCollectors.toDamlTextMap(
+                ValueOuterClass.Map.Entry::getKey, entry -> fromProto(entry.getValue())));
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Date.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Date.java
new file mode 100644
index 0000000000..c46d9ef1df
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Date.java
@@ -0,0 +1,47 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.ValueOuterClass;
+import java.time.LocalDate;
+import java.util.Objects;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class Date extends Value {
+
+  private final LocalDate value;
+
+  public Date(int value) {
+    this.value = LocalDate.ofEpochDay(value);
+  }
+
+  @Override
+  public ValueOuterClass.Value toProto() {
+    return ValueOuterClass.Value.newBuilder().setDate((int) this.value.toEpochDay()).build();
+  }
+
+  @NonNull
+  public LocalDate getValue() {
+    return value;
+  }
+
+  @Override
+  public String toString() {
+    return "Date{" + "value=" + value + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    Date date = (Date) o;
+    return Objects.equals(value, date.value);
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(value);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Decimal.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Decimal.java
new file mode 100644
index 0000000000..3cda2d15de
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Decimal.java
@@ -0,0 +1,16 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import java.math.BigDecimal;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+// FIXME When removing this after the deprecation period is over, make Numeric final
+/** @deprecated Use {@link Numeric} instead. */
+@Deprecated
+public final class Decimal extends Numeric {
+  public Decimal(@NonNull BigDecimal value) {
+    super(value);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DeleteUserRequest.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DeleteUserRequest.java
new file mode 100644
index 0000000000..025128fbbf
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DeleteUserRequest.java
@@ -0,0 +1,45 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.admin.UserManagementServiceOuterClass;
+import java.util.Objects;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class DeleteUserRequest {
+
+  private final String userId;
+
+  public DeleteUserRequest(@NonNull String userId) {
+    this.userId = userId;
+  }
+
+  public String getId() {
+    return userId;
+  }
+
+  @Override
+  public String toString() {
+    return "DeleteUserRequest{" + "userId='" + userId + '\'' + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    DeleteUserRequest that = (DeleteUserRequest) o;
+    return Objects.equals(userId, that.userId);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(userId);
+  }
+
+  public UserManagementServiceOuterClass.DeleteUserRequest toProto() {
+    return UserManagementServiceOuterClass.DeleteUserRequest.newBuilder()
+        .setUserId(this.userId)
+        .build();
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DeleteUserResponse.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DeleteUserResponse.java
new file mode 100644
index 0000000000..b411d04067
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DeleteUserResponse.java
@@ -0,0 +1,24 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.admin.UserManagementServiceOuterClass;
+
+public final class DeleteUserResponse {
+
+  private static final DeleteUserResponse INSTANCE = new DeleteUserResponse();
+
+  private DeleteUserResponse() {}
+
+  @Override
+  public String toString() {
+    return "DeleteUserResponse{}";
+  }
+
+  public static DeleteUserResponse fromProto(
+      UserManagementServiceOuterClass.DeleteUserResponse response) {
+    // As this is so far a singleton, we just ignore the response
+    return INSTANCE;
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DisclosedContract.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DisclosedContract.java
new file mode 100644
index 0000000000..032d1e2096
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/DisclosedContract.java
@@ -0,0 +1,27 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.CommandsOuterClass;
+import com.google.protobuf.ByteString;
+
+public final class DisclosedContract {
+  public final Identifier templateId;
+  public final String contractId;
+  public final ByteString createdEventBlob;
+
+  public DisclosedContract(Identifier templateId, String contractId, ByteString createdEventBlob) {
+    this.templateId = templateId;
+    this.contractId = contractId;
+    this.createdEventBlob = createdEventBlob;
+  }
+
+  public CommandsOuterClass.DisclosedContract toProto() {
+    return CommandsOuterClass.DisclosedContract.newBuilder()
+        .setTemplateId(this.templateId.toProto())
+        .setContractId(this.contractId)
+        .setCreatedEventBlob(this.createdEventBlob)
+        .build();
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Event.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Event.java
new file mode 100644
index 0000000000..bf7b81de11
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Event.java
@@ -0,0 +1,56 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.EventOuterClass;
+import java.util.List;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+/**
+ * This interface represents events in {@link Transaction}s.
+ *
+ * @see CreatedEvent
+ * @see ArchivedEvent
+ * @see Transaction
+ */
+public interface Event {
+
+  @NonNull
+  List<@NonNull String> getWitnessParties();
+
+  @NonNull
+  String getEventId();
+
+  @NonNull
+  Identifier getTemplateId();
+
+  @NonNull
+  String getContractId();
+
+  default EventOuterClass.Event toProtoEvent() {
+    EventOuterClass.Event.Builder eventBuilder = EventOuterClass.Event.newBuilder();
+    if (this instanceof ArchivedEvent) {
+      ArchivedEvent event = (ArchivedEvent) this;
+      eventBuilder.setArchived(event.toProto());
+    } else if (this instanceof CreatedEvent) {
+      CreatedEvent event = (CreatedEvent) this;
+      eventBuilder.setCreated(event.toProto());
+    } else {
+      throw new RuntimeException(
+          "this should be ArchivedEvent or CreatedEvent or ExercisedEvent, found "
+              + this.toString());
+    }
+    return eventBuilder.build();
+  }
+
+  static Event fromProtoEvent(EventOuterClass.Event event) {
+    if (event.hasCreated()) {
+      return CreatedEvent.fromProto(event.getCreated());
+    } else if (event.hasArchived()) {
+      return ArchivedEvent.fromProto(event.getArchived());
+    } else {
+      throw new UnsupportedEventTypeException(event.toString());
+    }
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/EventUtils.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/EventUtils.java
new file mode 100644
index 0000000000..cdb48f0a28
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/EventUtils.java
@@ -0,0 +1,33 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import java.util.List;
+
+public class EventUtils {
+
+  private EventUtils() {}
+
+  /** @hidden */
+  public static CreatedEvent singleCreatedEvent(List events) {
+    if (events.size() == 1 && events.get(0) instanceof CreatedEvent)
+      return (CreatedEvent) events.get(0);
+    throw new IllegalArgumentException(
+        "Expected exactly one created event from the transaction, got: " + events);
+  }
+
+  /** @hidden */
+  public static ExercisedEvent firstExercisedEvent(TransactionTree txTree) {
+    var maybeExercisedEvent =
+        txTree.getRootEventIds().stream()
+            .map(eventId -> txTree.getEventsById().get(eventId))
+            .filter(e -> e instanceof ExercisedEvent)
+            .map(e -> (ExercisedEvent) e)
+            .findFirst();
+
+    return maybeExercisedEvent.orElseThrow(
+        () ->
+            new IllegalArgumentException("Expect an exercised event but not found. tx: " + txTree));
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ExerciseByKeyCommand.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ExerciseByKeyCommand.java
new file mode 100644
index 0000000000..7fb49702b1
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ExerciseByKeyCommand.java
@@ -0,0 +1,100 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.CommandsOuterClass;
+import java.util.Objects;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class ExerciseByKeyCommand extends Command {
+
+  private final Identifier templateId;
+
+  private final Value contractKey;
+
+  private final String choice;
+
+  private final Value choiceArgument;
+
+  public ExerciseByKeyCommand(
+      @NonNull Identifier templateId,
+      @NonNull Value contractKey,
+      @NonNull String choice,
+      @NonNull Value choiceArgument) {
+    this.templateId = templateId;
+    this.contractKey = contractKey;
+    this.choice = choice;
+    this.choiceArgument = choiceArgument;
+  }
+
+  public static ExerciseByKeyCommand fromProto(CommandsOuterClass.ExerciseByKeyCommand command) {
+    Identifier templateId = Identifier.fromProto(command.getTemplateId());
+    Value contractKey = Value.fromProto(command.getContractKey());
+    String choice = command.getChoice();
+    Value choiceArgument = Value.fromProto(command.getChoiceArgument());
+    return new ExerciseByKeyCommand(templateId, contractKey, choice, choiceArgument);
+  }
+
+  public CommandsOuterClass.ExerciseByKeyCommand toProto() {
+    return CommandsOuterClass.ExerciseByKeyCommand.newBuilder()
+        .setTemplateId(this.templateId.toProto())
+        .setContractKey(this.contractKey.toProto())
+        .setChoice(this.choice)
+        .setChoiceArgument(this.choiceArgument.toProto())
+        .build();
+  }
+
+  @NonNull
+  @Override
+  public Identifier getTemplateId() {
+    return templateId;
+  }
+
+  @NonNull
+  public Value getContractKey() {
+    return contractKey;
+  }
+
+  @NonNull
+  public String getChoice() {
+    return choice;
+  }
+
+  @NonNull
+  public Value getChoiceArgument() {
+    return choiceArgument;
+  }
+
+  @Override
+  public String toString() {
+    return "ExerciseByKeyCommand{"
+        + "templateId="
+        + templateId
+        + ", contractKey='"
+        + contractKey
+        + '\''
+        + ", choice='"
+        + choice
+        + '\''
+        + ", choiceArgument="
+        + choiceArgument
+        + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    ExerciseByKeyCommand that = (ExerciseByKeyCommand) o;
+    return Objects.equals(templateId, that.templateId)
+        && Objects.equals(contractKey, that.contractKey)
+        && Objects.equals(choice, that.choice)
+        && Objects.equals(choiceArgument, that.choiceArgument);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(templateId, contractKey, choice, choiceArgument);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ExerciseCommand.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ExerciseCommand.java
new file mode 100644
index 0000000000..447f423d9d
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ExerciseCommand.java
@@ -0,0 +1,101 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.CommandsOuterClass;
+import java.util.Objects;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class ExerciseCommand extends Command {
+
+  private final Identifier templateId;
+
+  private final String contractId;
+
+  private final String choice;
+
+  private final Value choiceArgument;
+
+  public ExerciseCommand(
+      @NonNull Identifier templateId,
+      @NonNull String contractId,
+      @NonNull String choice,
+      @NonNull Value choiceArgument) {
+    this.templateId = templateId;
+    this.contractId = contractId;
+    this.choice = choice;
+    this.choiceArgument = choiceArgument;
+  }
+
+  public static ExerciseCommand fromProto(CommandsOuterClass.ExerciseCommand command) {
+    Identifier templateId = Identifier.fromProto(command.getTemplateId());
+    String contractId = command.getContractId();
+    String choice = command.getChoice();
+    Value choiceArgument = Value.fromProto(command.getChoiceArgument());
+    return new ExerciseCommand(templateId, contractId, choice, choiceArgument);
+  }
+
+  public CommandsOuterClass.ExerciseCommand toProto() {
+    return CommandsOuterClass.ExerciseCommand.newBuilder()
+        .setTemplateId(this.templateId.toProto())
+        .setContractId(this.contractId)
+        .setChoice(this.choice)
+        .setChoiceArgument(this.choiceArgument.toProto())
+        .build();
+  }
+
+  @NonNull
+  @Override
+  public Identifier getTemplateId() {
+    return templateId;
+  }
+
+  @NonNull
+  public String getContractId() {
+    return contractId;
+  }
+
+  @NonNull
+  public String getChoice() {
+    return choice;
+  }
+
+  @NonNull
+  public Value getChoiceArgument() {
+    return choiceArgument;
+  }
+
+  @Override
+  public String toString() {
+    return "ExerciseCommand{"
+        + "templateId="
+        + templateId
+        + ", contractId='"
+        + contractId
+        + '\''
+        + ", choice='"
+        + choice
+        + '\''
+        + ", choiceArgument="
+        + choiceArgument
+        + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    ExerciseCommand that = (ExerciseCommand) o;
+    return Objects.equals(templateId, that.templateId)
+        && Objects.equals(contractId, that.contractId)
+        && Objects.equals(choice, that.choice)
+        && Objects.equals(choiceArgument, that.choiceArgument);
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(templateId, contractId, choice, choiceArgument);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ExercisedEvent.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ExercisedEvent.java
new file mode 100644
index 0000000000..0a81d979dc
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ExercisedEvent.java
@@ -0,0 +1,216 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.EventOuterClass;
+import java.util.List;
+import java.util.Objects;
+import java.util.Optional;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class ExercisedEvent implements TreeEvent {
+
+  private final List witnessParties;
+
+  private final String eventId;
+
+  private final Identifier templateId;
+
+  private final Optional interfaceId;
+
+  private final String contractId;
+
+  private final String choice;
+
+  private final Value choiceArgument;
+
+  private final java.util.List actingParties;
+
+  private final boolean consuming;
+
+  private final List childEventIds;
+
+  private final Value exerciseResult;
+
+  public ExercisedEvent(
+      @NonNull List<@NonNull String> witnessParties,
+      @NonNull String eventId,
+      @NonNull Identifier templateId,
+      @NonNull Optional interfaceId,
+      @NonNull String contractId,
+      @NonNull String choice,
+      @NonNull Value choiceArgument,
+      @NonNull List<@NonNull String> actingParties,
+      boolean consuming,
+      @NonNull List<@NonNull String> childEventIds,
+      @NonNull Value exerciseResult) {
+    this.witnessParties = witnessParties;
+    this.eventId = eventId;
+    this.templateId = templateId;
+    this.interfaceId = interfaceId;
+    this.contractId = contractId;
+    this.choice = choice;
+    this.choiceArgument = choiceArgument;
+    this.actingParties = actingParties;
+    this.consuming = consuming;
+    this.childEventIds = childEventIds;
+    this.exerciseResult = exerciseResult;
+  }
+
+  @NonNull
+  @Override
+  public List<@NonNull String> getWitnessParties() {
+    return witnessParties;
+  }
+
+  @NonNull
+  @Override
+  public String getEventId() {
+    return eventId;
+  }
+
+  @NonNull
+  @Override
+  public Identifier getTemplateId() {
+    return templateId;
+  }
+
+  @NonNull
+  public Optional getInterfaceId() {
+    return interfaceId;
+  }
+
+  @NonNull
+  @Override
+  public String getContractId() {
+    return contractId;
+  }
+
+  @NonNull
+  public String getChoice() {
+    return choice;
+  }
+
+  @NonNull
+  public List<@NonNull String> getChildEventIds() {
+    return childEventIds;
+  }
+
+  @NonNull
+  public Value getChoiceArgument() {
+    return choiceArgument;
+  }
+
+  public @NonNull List<@NonNull String> getActingParties() {
+    return actingParties;
+  }
+
+  public boolean isConsuming() {
+    return consuming;
+  }
+
+  @NonNull
+  public Value getExerciseResult() {
+    return exerciseResult;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    ExercisedEvent that = (ExercisedEvent) o;
+    return consuming == that.consuming
+        && Objects.equals(witnessParties, that.witnessParties)
+        && Objects.equals(eventId, that.eventId)
+        && Objects.equals(templateId, that.templateId)
+        && Objects.equals(interfaceId, that.interfaceId)
+        && Objects.equals(contractId, that.contractId)
+        && Objects.equals(choice, that.choice)
+        && Objects.equals(choiceArgument, that.choiceArgument)
+        && Objects.equals(actingParties, that.actingParties)
+        && Objects.equals(childEventIds, that.childEventIds)
+        && Objects.equals(exerciseResult, that.exerciseResult);
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(
+        witnessParties,
+        eventId,
+        templateId,
+        interfaceId,
+        contractId,
+        choice,
+        choiceArgument,
+        actingParties,
+        childEventIds,
+        consuming,
+        exerciseResult);
+  }
+
+  @Override
+  public String toString() {
+    return "ExercisedEvent{"
+        + "witnessParties="
+        + witnessParties
+        + ", eventId='"
+        + eventId
+        + '\''
+        + ", templateId="
+        + templateId
+        + ", interfaceId="
+        + interfaceId
+        + ", contractId='"
+        + contractId
+        + '\''
+        + ", choice='"
+        + choice
+        + '\''
+        + ", choiceArgument="
+        + choiceArgument
+        + ", actingParties="
+        + actingParties
+        + ", consuming="
+        + consuming
+        + ", childEventIds="
+        + childEventIds
+        + ", exerciseResult="
+        + exerciseResult
+        + '}';
+  }
+
+  public EventOuterClass.@NonNull ExercisedEvent toProto() {
+    EventOuterClass.ExercisedEvent.Builder builder = EventOuterClass.ExercisedEvent.newBuilder();
+    builder.setEventId(getEventId());
+    builder.setChoice(getChoice());
+    builder.setChoiceArgument(getChoiceArgument().toProto());
+    builder.setConsuming(isConsuming());
+    builder.setContractId(getContractId());
+    builder.setTemplateId(getTemplateId().toProto());
+    interfaceId.ifPresent(i -> builder.setInterfaceId(i.toProto()));
+    builder.addAllActingParties(getActingParties());
+    builder.addAllWitnessParties(getWitnessParties());
+    builder.addAllChildEventIds(getChildEventIds());
+    builder.setExerciseResult(getExerciseResult().toProto());
+    return builder.build();
+  }
+
+  public static ExercisedEvent fromProto(EventOuterClass.ExercisedEvent exercisedEvent) {
+    return new ExercisedEvent(
+        exercisedEvent.getWitnessPartiesList(),
+        exercisedEvent.getEventId(),
+        Identifier.fromProto(exercisedEvent.getTemplateId()),
+        exercisedEvent.hasInterfaceId()
+            ? Optional.of(Identifier.fromProto(exercisedEvent.getInterfaceId()))
+            : Optional.empty(),
+        exercisedEvent.getContractId(),
+        exercisedEvent.getChoice(),
+        Value.fromProto(exercisedEvent.getChoiceArgument()),
+        exercisedEvent.getActingPartiesList(),
+        exercisedEvent.getConsuming(),
+        exercisedEvent.getChildEventIdsList(),
+        Value.fromProto(exercisedEvent.getExerciseResult()));
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Filter.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Filter.java
new file mode 100644
index 0000000000..23cd4c2949
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Filter.java
@@ -0,0 +1,97 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.TransactionFilterOuterClass;
+
+public abstract class Filter {
+
+  public static Filter fromProto(TransactionFilterOuterClass.Filters filters) {
+    if (filters.hasInclusive()) {
+      return InclusiveFilter.fromProto(filters.getInclusive());
+    } else {
+      return NoFilter.instance;
+    }
+  }
+
+  public abstract TransactionFilterOuterClass.Filters toProto();
+
+  /**
+   * Settings for including an interface in {@link InclusiveFilter}. There are four possible values:
+   * {@link #HIDE_VIEW_HIDE_CREATED_EVENT_BLOB} and {@link #INCLUDE_VIEW_HIDE_CREATED_EVENT_BLOB}
+   * and {@link #HIDE_VIEW_INCLUDE_CREATED_EVENT_BLOB} and {@link
+   * #INCLUDE_VIEW_INCLUDE_CREATED_EVENT_BLOB}.
+   */
+  public static enum Interface {
+    HIDE_VIEW_HIDE_CREATED_EVENT_BLOB(false, false),
+    INCLUDE_VIEW_HIDE_CREATED_EVENT_BLOB(true, false),
+    HIDE_VIEW_INCLUDE_CREATED_EVENT_BLOB(false, true),
+    INCLUDE_VIEW_INCLUDE_CREATED_EVENT_BLOB(true, true);
+
+    public final boolean includeInterfaceView;
+    public final boolean includeCreatedEventBlob;
+
+    Interface(boolean includeInterfaceView, boolean includeCreatedEventBlob) {
+      this.includeInterfaceView = includeInterfaceView;
+      this.includeCreatedEventBlob = includeCreatedEventBlob;
+    }
+
+    private static Interface includeInterfaceView(
+        boolean includeInterfaceView, boolean includeCreatedEventBlob) {
+      if (!includeInterfaceView && !includeCreatedEventBlob)
+        return HIDE_VIEW_HIDE_CREATED_EVENT_BLOB;
+      else if (includeInterfaceView && !includeCreatedEventBlob)
+        return INCLUDE_VIEW_HIDE_CREATED_EVENT_BLOB;
+      else if (!includeInterfaceView) return HIDE_VIEW_INCLUDE_CREATED_EVENT_BLOB;
+      else return INCLUDE_VIEW_INCLUDE_CREATED_EVENT_BLOB;
+    }
+
+    public TransactionFilterOuterClass.InterfaceFilter toProto(Identifier interfaceId) {
+      return TransactionFilterOuterClass.InterfaceFilter.newBuilder()
+          .setInterfaceId(interfaceId.toProto())
+          .setIncludeInterfaceView(includeInterfaceView)
+          .build();
+    }
+
+    static Interface fromProto(TransactionFilterOuterClass.InterfaceFilter proto) {
+      return includeInterfaceView(
+          proto.getIncludeInterfaceView(), proto.getIncludeCreatedEventBlob());
+    }
+
+    Interface merge(Interface other) {
+      return includeInterfaceView(
+          includeInterfaceView || other.includeInterfaceView,
+          includeCreatedEventBlob || other.includeCreatedEventBlob);
+    }
+  }
+
+  public static enum Template {
+    INCLUDE_CREATED_EVENT_BLOB(true),
+    HIDE_CREATED_EVENT_BLOB(false);
+    public final boolean includeCreatedEventBlob;
+
+    Template(boolean includeCreatedEventBlob) {
+      this.includeCreatedEventBlob = includeCreatedEventBlob;
+    }
+
+    private static Template includeCreatedEventBlob(boolean includeCreatedEventBlob) {
+      return includeCreatedEventBlob ? INCLUDE_CREATED_EVENT_BLOB : HIDE_CREATED_EVENT_BLOB;
+    }
+
+    public TransactionFilterOuterClass.TemplateFilter toProto(Identifier templateId) {
+      return TransactionFilterOuterClass.TemplateFilter.newBuilder()
+          .setTemplateId(templateId.toProto())
+          .setIncludeCreatedEventBlob(includeCreatedEventBlob)
+          .build();
+    }
+
+    static Template fromProto(TransactionFilterOuterClass.TemplateFilter proto) {
+      return includeCreatedEventBlob(proto.getIncludeCreatedEventBlob());
+    }
+
+    Template merge(Template other) {
+      return includeCreatedEventBlob(includeCreatedEventBlob || other.includeCreatedEventBlob);
+    }
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/FiltersByParty.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/FiltersByParty.java
new file mode 100644
index 0000000000..991061f49d
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/FiltersByParty.java
@@ -0,0 +1,71 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.TransactionFilterOuterClass;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class FiltersByParty extends TransactionFilter {
+
+  private Map partyToFilters;
+
+  @Override
+  public Set getParties() {
+    return partyToFilters.keySet();
+  }
+
+  public FiltersByParty(@NonNull Map<@NonNull String, @NonNull Filter> partyToFilters) {
+    this.partyToFilters = partyToFilters;
+  }
+
+  @Override
+  public TransactionFilterOuterClass.TransactionFilter toProto() {
+    HashMap partyToFilters =
+        new HashMap<>(this.partyToFilters.size());
+    for (Map.Entry entry : this.partyToFilters.entrySet()) {
+      partyToFilters.put(entry.getKey(), entry.getValue().toProto());
+    }
+    return TransactionFilterOuterClass.TransactionFilter.newBuilder()
+        .putAllFiltersByParty(partyToFilters)
+        .build();
+  }
+
+  public static FiltersByParty fromProto(
+      TransactionFilterOuterClass.TransactionFilter transactionFilter) {
+    Map partyToFilters =
+        transactionFilter.getFiltersByPartyMap();
+    HashMap converted = new HashMap<>(partyToFilters.size());
+    for (Map.Entry entry : partyToFilters.entrySet()) {
+      converted.put(entry.getKey(), Filter.fromProto(entry.getValue()));
+    }
+    return new FiltersByParty(converted);
+  }
+
+  public Map getPartyToFilters() {
+    return partyToFilters;
+  }
+
+  @Override
+  public String toString() {
+    return "FiltersByParty{" + "partyToFilters=" + partyToFilters + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    FiltersByParty that = (FiltersByParty) o;
+    return Objects.equals(partyToFilters, that.partyToFilters);
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(partyToFilters);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetActiveContractsRequest.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetActiveContractsRequest.java
new file mode 100644
index 0000000000..04243d1aba
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetActiveContractsRequest.java
@@ -0,0 +1,85 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.ActiveContractsServiceOuterClass;
+import java.util.Objects;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class GetActiveContractsRequest {
+
+  private final String ledgerId;
+
+  private final TransactionFilter transactionFilter;
+
+  private final boolean verbose;
+
+  public GetActiveContractsRequest(
+      @NonNull String ledgerId,
+      @NonNull TransactionFilter transactionFilter,
+      @NonNull boolean verbose) {
+    this.ledgerId = ledgerId;
+    this.transactionFilter = transactionFilter;
+    this.verbose = verbose;
+  }
+
+  public static GetActiveContractsRequest fromProto(
+      ActiveContractsServiceOuterClass.GetActiveContractsRequest request) {
+    String ledgerId = request.getLedgerId();
+    TransactionFilter filters = TransactionFilter.fromProto(request.getFilter());
+    boolean verbose = request.getVerbose();
+    return new GetActiveContractsRequest(ledgerId, filters, verbose);
+  }
+
+  public ActiveContractsServiceOuterClass.GetActiveContractsRequest toProto() {
+    return ActiveContractsServiceOuterClass.GetActiveContractsRequest.newBuilder()
+        .setLedgerId(this.ledgerId)
+        .setFilter(this.transactionFilter.toProto())
+        .setVerbose(this.verbose)
+        .build();
+  }
+
+  @NonNull
+  public String getLedgerId() {
+    return ledgerId;
+  }
+
+  @NonNull
+  public TransactionFilter getTransactionFilter() {
+    return transactionFilter;
+  }
+
+  public boolean isVerbose() {
+    return verbose;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    GetActiveContractsRequest that = (GetActiveContractsRequest) o;
+    return verbose == that.verbose
+        && Objects.equals(ledgerId, that.ledgerId)
+        && Objects.equals(transactionFilter, that.transactionFilter);
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(ledgerId, transactionFilter, verbose);
+  }
+
+  @Override
+  public String toString() {
+    return "GetActiveContractsRequest{"
+        + "ledgerId='"
+        + ledgerId
+        + '\''
+        + ", transactionFilter="
+        + transactionFilter
+        + ", verbose="
+        + verbose
+        + '}';
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetActiveContractsResponse.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetActiveContractsResponse.java
new file mode 100644
index 0000000000..578ffdf35d
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetActiveContractsResponse.java
@@ -0,0 +1,90 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.ActiveContractsServiceOuterClass;
+import java.util.List;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.stream.Collectors;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class GetActiveContractsResponse implements WorkflowEvent {
+
+  private final String offset;
+
+  private final java.util.List activeContracts;
+
+  private final String workflowId;
+
+  public GetActiveContractsResponse(
+      @NonNull String offset, @NonNull List activeContracts, String workflowId) {
+    this.offset = offset;
+    this.activeContracts = activeContracts;
+    this.workflowId = workflowId;
+  }
+
+  public static GetActiveContractsResponse fromProto(
+      ActiveContractsServiceOuterClass.GetActiveContractsResponse response) {
+    List events =
+        response.getActiveContractsList().stream()
+            .map(CreatedEvent::fromProto)
+            .collect(Collectors.toList());
+    return new GetActiveContractsResponse(response.getOffset(), events, response.getWorkflowId());
+  }
+
+  public ActiveContractsServiceOuterClass.GetActiveContractsResponse toProto() {
+    return ActiveContractsServiceOuterClass.GetActiveContractsResponse.newBuilder()
+        .setOffset(this.offset)
+        .addAllActiveContracts(
+            this.activeContracts.stream().map(CreatedEvent::toProto).collect(Collectors.toList()))
+        .setWorkflowId(this.workflowId)
+        .build();
+  }
+
+  @NonNull
+  public Optional getOffset() {
+    // Empty string indicates that the field is not present in the protobuf.
+    return Optional.of(offset).filter(off -> !offset.equals(""));
+  }
+
+  @NonNull
+  public List<@NonNull CreatedEvent> getCreatedEvents() {
+    return activeContracts;
+  }
+
+  @NonNull
+  public String getWorkflowId() {
+    return workflowId;
+  }
+
+  @Override
+  public String toString() {
+    return "GetActiveContractsResponse{"
+        + "offset='"
+        + offset
+        + '\''
+        + ", activeContracts="
+        + activeContracts
+        + ", workflowId="
+        + workflowId
+        + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    GetActiveContractsResponse that = (GetActiveContractsResponse) o;
+    return Objects.equals(offset, that.offset)
+        && Objects.equals(activeContracts, that.activeContracts)
+        && Objects.equals(workflowId, that.workflowId);
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(offset, activeContracts, workflowId);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetEventsByContractIdResponse.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetEventsByContractIdResponse.java
new file mode 100644
index 0000000000..e12e39a931
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetEventsByContractIdResponse.java
@@ -0,0 +1,37 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import java.util.Optional;
+
+public final class GetEventsByContractIdResponse {
+  private final Optional createEvent;
+
+  private final Optional archiveEvent;
+
+  public GetEventsByContractIdResponse(
+      Optional createEvent, Optional archiveEvent) {
+    this.createEvent = createEvent;
+    this.archiveEvent = archiveEvent;
+  }
+
+  public Optional getCreateEvent() {
+    return createEvent;
+  }
+
+  public Optional getArchiveEvent() {
+    return archiveEvent;
+  }
+
+  public static GetEventsByContractIdResponse fromProto(
+      com.daml.ledger.api.v1.EventQueryServiceOuterClass.GetEventsByContractIdResponse response) {
+    return new GetEventsByContractIdResponse(
+        response.hasCreateEvent()
+            ? Optional.of(CreatedEvent.fromProto(response.getCreateEvent()))
+            : Optional.empty(),
+        response.hasArchiveEvent()
+            ? Optional.of(ArchivedEvent.fromProto(response.getArchiveEvent()))
+            : Optional.empty());
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetEventsByContractKeyResponse.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetEventsByContractKeyResponse.java
new file mode 100644
index 0000000000..d56a61af97
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetEventsByContractKeyResponse.java
@@ -0,0 +1,47 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import java.util.Optional;
+
+public final class GetEventsByContractKeyResponse {
+  private final Optional createEvent;
+  private final Optional archiveEvent;
+  private final Optional continuationToken;
+
+  public GetEventsByContractKeyResponse(
+      Optional createEvent,
+      Optional archiveEvent,
+      Optional continuationToken) {
+    this.createEvent = createEvent;
+    this.archiveEvent = archiveEvent;
+    this.continuationToken = continuationToken;
+  }
+
+  public Optional getCreateEvent() {
+    return createEvent;
+  }
+
+  public Optional getArchiveEvent() {
+    return archiveEvent;
+  }
+
+  public Optional getContinuationToken() {
+    return continuationToken;
+  }
+
+  public static GetEventsByContractKeyResponse fromProto(
+      com.daml.ledger.api.v1.EventQueryServiceOuterClass.GetEventsByContractKeyResponse response) {
+    return new GetEventsByContractKeyResponse(
+        response.hasCreateEvent()
+            ? Optional.of(CreatedEvent.fromProto(response.getCreateEvent()))
+            : Optional.empty(),
+        response.hasArchiveEvent()
+            ? Optional.of(ArchivedEvent.fromProto(response.getArchiveEvent()))
+            : Optional.empty(),
+        response.getContinuationToken().isEmpty()
+            ? Optional.of(response.getContinuationToken())
+            : Optional.empty());
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetFlatTransactionResponse.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetFlatTransactionResponse.java
new file mode 100644
index 0000000000..8b913578c2
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetFlatTransactionResponse.java
@@ -0,0 +1,51 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.TransactionServiceOuterClass;
+import java.util.Objects;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class GetFlatTransactionResponse {
+
+  private final Transaction transaction;
+
+  public GetFlatTransactionResponse(@NonNull Transaction transaction) {
+    this.transaction = transaction;
+  }
+
+  public static GetFlatTransactionResponse fromProto(
+      TransactionServiceOuterClass.GetFlatTransactionResponse response) {
+    return new GetFlatTransactionResponse(Transaction.fromProto(response.getTransaction()));
+  }
+
+  public TransactionServiceOuterClass.GetFlatTransactionResponse toProto() {
+    return TransactionServiceOuterClass.GetFlatTransactionResponse.newBuilder()
+        .setTransaction(this.transaction.toProto())
+        .build();
+  }
+
+  public Transaction getTransaction() {
+    return transaction;
+  }
+
+  @Override
+  public String toString() {
+    return "GetFlatTransactionResponse{" + "transaction=" + transaction + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    GetFlatTransactionResponse that = (GetFlatTransactionResponse) o;
+    return Objects.equals(transaction, that.transaction);
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(transaction);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetLedgerEndResponse.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetLedgerEndResponse.java
new file mode 100644
index 0000000000..b157f28cfc
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetLedgerEndResponse.java
@@ -0,0 +1,52 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.TransactionServiceOuterClass;
+import java.util.Objects;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class GetLedgerEndResponse {
+
+  private final LedgerOffset offset;
+
+  public GetLedgerEndResponse(@NonNull LedgerOffset offset) {
+    this.offset = offset;
+  }
+
+  public static GetLedgerEndResponse fromProto(
+      TransactionServiceOuterClass.GetLedgerEndResponse response) {
+    return new GetLedgerEndResponse(LedgerOffset.fromProto(response.getOffset()));
+  }
+
+  public TransactionServiceOuterClass.GetLedgerEndResponse toProto() {
+    return TransactionServiceOuterClass.GetLedgerEndResponse.newBuilder()
+        .setOffset(this.offset.toProto())
+        .build();
+  }
+
+  @NonNull
+  public LedgerOffset getOffset() {
+    return offset;
+  }
+
+  @Override
+  public String toString() {
+    return "GetLedgerEndResponse{" + "offset=" + offset + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    GetLedgerEndResponse that = (GetLedgerEndResponse) o;
+    return Objects.equals(offset, that.offset);
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(offset);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetPackageResponse.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetPackageResponse.java
new file mode 100644
index 0000000000..b402ca8e5a
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetPackageResponse.java
@@ -0,0 +1,64 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.google.protobuf.ByteString;
+import java.util.EnumSet;
+import java.util.Map;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class GetPackageResponse {
+
+  // Clone of the PackageServiceOuterClass.HashFunction enumeration
+  public enum HashFunction {
+    SHA256(0),
+    UNRECOGNIZED(-1),
+    ;
+
+    private final int value;
+
+    private static Map valueToEnumMap =
+        EnumSet.allOf(GetPackageResponse.HashFunction.class).stream()
+            .collect(Collectors.toMap(e -> e.value, Function.identity()));
+
+    private HashFunction(int value) {
+      this.value = value;
+    }
+
+    public static GetPackageResponse.HashFunction valueOf(int value) {
+      return valueToEnumMap.getOrDefault(value, UNRECOGNIZED);
+    }
+  }
+
+  private final HashFunction hashFunction;
+  private final String hash;
+  private final ByteString archivePayload;
+
+  public GetPackageResponse(
+      HashFunction hashFunction, @NonNull String hash, @NonNull ByteString archivePayload) {
+    this.hashFunction = hashFunction;
+    this.hash = hash;
+    this.archivePayload = archivePayload;
+  }
+
+  public HashFunction getHashFunction() {
+    return hashFunction;
+  }
+
+  public String getHash() {
+    return hash;
+  }
+
+  public byte[] getArchivePayload() {
+    return archivePayload.toByteArray();
+  }
+
+  public static GetPackageResponse fromProto(
+      com.daml.ledger.api.v1.PackageServiceOuterClass.GetPackageResponse p) {
+    return new GetPackageResponse(
+        HashFunction.valueOf(p.getHashFunctionValue()), p.getHash(), p.getArchivePayload());
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetPackageStatusResponse.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetPackageStatusResponse.java
new file mode 100644
index 0000000000..eb238eac1f
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetPackageStatusResponse.java
@@ -0,0 +1,49 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import java.util.EnumSet;
+import java.util.Map;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+public final class GetPackageStatusResponse {
+
+  // Clone of the PackageServiceOuterClass.PackageStatus enumeration
+  public enum PackageStatus {
+    UNKNOWN(0),
+    REGISTERED(1),
+    UNRECOGNIZED(-1),
+    ;
+
+    private final int value;
+
+    private static Map valueToEnumMap =
+        EnumSet.allOf(PackageStatus.class).stream()
+            .collect(Collectors.toMap(e -> e.value, Function.identity()));
+
+    private PackageStatus(int value) {
+      this.value = value;
+    }
+
+    public static PackageStatus valueOf(int value) {
+      return valueToEnumMap.getOrDefault(value, UNRECOGNIZED);
+    }
+  }
+
+  private final PackageStatus packageStatus;
+
+  public GetPackageStatusResponse(PackageStatus packageStatus) {
+    this.packageStatus = packageStatus;
+  }
+
+  public PackageStatus getPackageStatusValue() {
+    return packageStatus;
+  }
+
+  public static GetPackageStatusResponse fromProto(
+      com.daml.ledger.api.v1.PackageServiceOuterClass.GetPackageStatusResponse p) {
+    return new GetPackageStatusResponse(PackageStatus.valueOf(p.getPackageStatusValue()));
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionResponse.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionResponse.java
new file mode 100644
index 0000000000..ba23e62af0
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionResponse.java
@@ -0,0 +1,51 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.TransactionServiceOuterClass;
+import java.util.Objects;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class GetTransactionResponse {
+
+  private final TransactionTree transaction;
+
+  public GetTransactionResponse(@NonNull TransactionTree transaction) {
+    this.transaction = transaction;
+  }
+
+  public static GetTransactionResponse fromProto(
+      TransactionServiceOuterClass.GetTransactionResponse response) {
+    return new GetTransactionResponse(TransactionTree.fromProto(response.getTransaction()));
+  }
+
+  public TransactionServiceOuterClass.GetTransactionResponse toProto() {
+    return TransactionServiceOuterClass.GetTransactionResponse.newBuilder()
+        .setTransaction(this.transaction.toProto())
+        .build();
+  }
+
+  public TransactionTree getTransaction() {
+    return transaction;
+  }
+
+  @Override
+  public String toString() {
+    return "GetTransactionResponse{" + "transaction=" + transaction + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    GetTransactionResponse that = (GetTransactionResponse) o;
+    return Objects.equals(transaction, that.transaction);
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(transaction);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionTreesResponse.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionTreesResponse.java
new file mode 100644
index 0000000000..604ad4f996
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionTreesResponse.java
@@ -0,0 +1,64 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.TransactionOuterClass;
+import com.daml.ledger.api.v1.TransactionServiceOuterClass;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class GetTransactionTreesResponse {
+
+  private final List transactions;
+
+  public GetTransactionTreesResponse(@NonNull List<@NonNull TransactionTree> transactions) {
+    this.transactions = transactions;
+  }
+
+  public static GetTransactionTreesResponse fromProto(
+      TransactionServiceOuterClass.GetTransactionTreesResponse response) {
+    ArrayList transactionTrees = new ArrayList<>(response.getTransactionsCount());
+    for (TransactionOuterClass.TransactionTree transactionTree : response.getTransactionsList()) {
+      transactionTrees.add(TransactionTree.fromProto(transactionTree));
+    }
+    return new GetTransactionTreesResponse(transactionTrees);
+  }
+
+  public TransactionServiceOuterClass.GetTransactionTreesResponse toProto() {
+    ArrayList transactionTrees =
+        new ArrayList<>(this.transactions.size());
+    for (TransactionTree transactionTree : this.transactions) {
+      transactionTrees.add(transactionTree.toProto());
+    }
+    return TransactionServiceOuterClass.GetTransactionTreesResponse.newBuilder()
+        .addAllTransactions(transactionTrees)
+        .build();
+  }
+
+  @NonNull
+  public List<@NonNull TransactionTree> getTransactions() {
+    return transactions;
+  }
+
+  @Override
+  public String toString() {
+    return "GetTransactionTreesResponse{" + "transactions=" + transactions + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    GetTransactionTreesResponse that = (GetTransactionTreesResponse) o;
+    return Objects.equals(transactions, that.transactions);
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(transactions);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionsRequest.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionsRequest.java
new file mode 100644
index 0000000000..63f8e61bbe
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionsRequest.java
@@ -0,0 +1,71 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.TransactionServiceOuterClass;
+import java.util.Optional;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class GetTransactionsRequest {
+
+  private final String ledgerId;
+
+  private final LedgerOffset begin;
+
+  private final Optional end;
+
+  private final TransactionFilter filter;
+
+  private final boolean verbose;
+
+  public GetTransactionsRequest(
+      @NonNull String ledgerId,
+      @NonNull LedgerOffset begin,
+      @NonNull LedgerOffset end,
+      @NonNull TransactionFilter filter,
+      boolean verbose) {
+    this.ledgerId = ledgerId;
+    this.begin = begin;
+    this.end = Optional.of(end);
+    this.filter = filter;
+    this.verbose = verbose;
+  }
+
+  public GetTransactionsRequest(
+      @NonNull String ledgerId,
+      @NonNull LedgerOffset begin,
+      @NonNull TransactionFilter filter,
+      boolean verbose) {
+    this.ledgerId = ledgerId;
+    this.begin = begin;
+    this.end = Optional.empty();
+    this.filter = filter;
+    this.verbose = verbose;
+  }
+
+  public static GetTransactionsRequest fromProto(
+      TransactionServiceOuterClass.GetTransactionsRequest request) {
+    String ledgerId = request.getLedgerId();
+    LedgerOffset begin = LedgerOffset.fromProto(request.getBegin());
+    TransactionFilter filter = TransactionFilter.fromProto(request.getFilter());
+    boolean verbose = request.getVerbose();
+    if (request.hasEnd()) {
+      LedgerOffset end = LedgerOffset.fromProto(request.getEnd());
+      return new GetTransactionsRequest(ledgerId, begin, end, filter, verbose);
+    } else {
+      return new GetTransactionsRequest(ledgerId, begin, filter, verbose);
+    }
+  }
+
+  public TransactionServiceOuterClass.GetTransactionsRequest toProto() {
+    TransactionServiceOuterClass.GetTransactionsRequest.Builder builder =
+        TransactionServiceOuterClass.GetTransactionsRequest.newBuilder();
+    builder.setLedgerId(this.ledgerId);
+    builder.setBegin(this.begin.toProto());
+    this.end.ifPresent(end -> builder.setEnd(end.toProto()));
+    builder.setFilter(this.filter.toProto());
+    builder.setVerbose(this.verbose);
+    return builder.build();
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionsResponse.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionsResponse.java
new file mode 100644
index 0000000000..b0e730905a
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetTransactionsResponse.java
@@ -0,0 +1,64 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.TransactionOuterClass;
+import com.daml.ledger.api.v1.TransactionServiceOuterClass;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class GetTransactionsResponse {
+
+  private final List transactions;
+
+  public GetTransactionsResponse(@NonNull List<@NonNull Transaction> transactions) {
+    this.transactions = transactions;
+  }
+
+  public static GetTransactionsResponse fromProto(
+      TransactionServiceOuterClass.GetTransactionsResponse response) {
+    ArrayList transactions = new ArrayList<>(response.getTransactionsCount());
+    for (TransactionOuterClass.Transaction transaction : response.getTransactionsList()) {
+      transactions.add(Transaction.fromProto(transaction));
+    }
+    return new GetTransactionsResponse(transactions);
+  }
+
+  public TransactionServiceOuterClass.GetTransactionsResponse toProto() {
+    ArrayList transactions =
+        new ArrayList<>(this.transactions.size());
+    for (Transaction transaction : this.transactions) {
+      transactions.add(transaction.toProto());
+    }
+    return TransactionServiceOuterClass.GetTransactionsResponse.newBuilder()
+        .addAllTransactions(transactions)
+        .build();
+  }
+
+  @NonNull
+  public List<@NonNull Transaction> getTransactions() {
+    return transactions;
+  }
+
+  @Override
+  public String toString() {
+    return "GetTransactionsResponse{" + "transactions=" + transactions + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    GetTransactionsResponse that = (GetTransactionsResponse) o;
+    return Objects.equals(transactions, that.transactions);
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(transactions);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetUserRequest.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetUserRequest.java
new file mode 100644
index 0000000000..91a6fcbb0b
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetUserRequest.java
@@ -0,0 +1,45 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.admin.UserManagementServiceOuterClass;
+import java.util.Objects;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class GetUserRequest {
+
+  private final String userId;
+
+  public GetUserRequest(@NonNull String userId) {
+    this.userId = userId;
+  }
+
+  public String getId() {
+    return userId;
+  }
+
+  @Override
+  public String toString() {
+    return "GetUserRequest{" + "userId='" + userId + '\'' + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    GetUserRequest that = (GetUserRequest) o;
+    return Objects.equals(userId, that.userId);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(userId);
+  }
+
+  public UserManagementServiceOuterClass.GetUserRequest toProto() {
+    return UserManagementServiceOuterClass.GetUserRequest.newBuilder()
+        .setUserId(this.userId)
+        .build();
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetUserResponse.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetUserResponse.java
new file mode 100644
index 0000000000..aa3b18794a
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetUserResponse.java
@@ -0,0 +1,48 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.admin.UserManagementServiceOuterClass;
+import java.util.Objects;
+
+public final class GetUserResponse {
+
+  private final User user;
+
+  public GetUserResponse(User user) {
+    this.user = user;
+  }
+
+  public User getUser() {
+    return user;
+  }
+
+  public static GetUserResponse fromProto(UserManagementServiceOuterClass.GetUserResponse proto) {
+    return new GetUserResponse(User.fromProto(proto.getUser()));
+  }
+
+  public UserManagementServiceOuterClass.GetUserResponse toProto() {
+    return UserManagementServiceOuterClass.GetUserResponse.newBuilder()
+        .setUser(this.user.toProto())
+        .build();
+  }
+
+  @Override
+  public String toString() {
+    return "GetUserResponse{" + "user=" + user + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    GetUserResponse that = (GetUserResponse) o;
+    return user.equals(that.user);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(user);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GrantUserRightsRequest.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GrantUserRightsRequest.java
new file mode 100644
index 0000000000..21d24f4574
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GrantUserRightsRequest.java
@@ -0,0 +1,57 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.admin.UserManagementServiceOuterClass;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Objects;
+import java.util.stream.Collectors;
+
+public final class GrantUserRightsRequest {
+
+  private final String userId;
+  private final List rights;
+
+  public GrantUserRightsRequest(String userId, User.Right right, User.Right... rights) {
+    this.userId = userId;
+    this.rights = new ArrayList<>(rights.length + 1);
+    this.rights.add(right);
+    this.rights.addAll(Arrays.asList(rights));
+  }
+
+  public String getUserId() {
+    return userId;
+  }
+
+  public List getRights() {
+    return new ArrayList<>(rights);
+  }
+
+  public UserManagementServiceOuterClass.GrantUserRightsRequest toProto() {
+    return UserManagementServiceOuterClass.GrantUserRightsRequest.newBuilder()
+        .setUserId(this.userId)
+        .addAllRights(this.rights.stream().map(User.Right::toProto).collect(Collectors.toList()))
+        .build();
+  }
+
+  @Override
+  public String toString() {
+    return "GrantUserRightsRequest{" + "userId=" + userId + ", rights=" + rights + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    GrantUserRightsRequest that = (GrantUserRightsRequest) o;
+    return userId.equals(that.userId) && rights.equals(that.rights);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(userId, rights);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GrantUserRightsResponse.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GrantUserRightsResponse.java
new file mode 100644
index 0000000000..af899a9282
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GrantUserRightsResponse.java
@@ -0,0 +1,50 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.admin.UserManagementServiceOuterClass;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import java.util.stream.Collectors;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class GrantUserRightsResponse {
+
+  private final List newlyGrantedRights;
+
+  public GrantUserRightsResponse(@NonNull List newlyGrantedRights) {
+    this.newlyGrantedRights = new ArrayList<>(newlyGrantedRights);
+  }
+
+  public List getNewlyGrantedRights() {
+    return new ArrayList<>(this.newlyGrantedRights);
+  }
+
+  public static GrantUserRightsResponse fromProto(
+      UserManagementServiceOuterClass.GrantUserRightsResponse proto) {
+    return new GrantUserRightsResponse(
+        proto.getNewlyGrantedRightsList().stream()
+            .map(User.Right::fromProto)
+            .collect(Collectors.toList()));
+  }
+
+  @Override
+  public String toString() {
+    return "GrantUserRightsResponse{" + "newlyGrantedRights=" + newlyGrantedRights + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    GrantUserRightsResponse that = (GrantUserRightsResponse) o;
+    return Objects.equals(newlyGrantedRights, that.newlyGrantedRights);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(newlyGrantedRights);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Identifier.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Identifier.java
new file mode 100644
index 0000000000..8cc86fee3e
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Identifier.java
@@ -0,0 +1,115 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.ValueOuterClass;
+import java.util.Objects;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class Identifier {
+
+  private final String packageId;
+  private final String moduleName;
+  private final String entityName;
+
+  /**
+   * This constructor is deprecated in favor of {@link Identifier#Identifier(String, String,
+   * String)}
+   */
+  @Deprecated
+  public Identifier(@NonNull String packageId, @NonNull String name) {
+    this.packageId = packageId;
+    int lastDot = name.lastIndexOf('.');
+    if (lastDot <= 0) {
+      // The module component of the name must be at least 1 character long.
+      // if no '.' is found or it is on the first position, then the name is not a valid identifier.
+      throw new IllegalArgumentException(
+          String.format(
+              "Identifier name [%s] has wrong format. Dot-separated module and entity name"
+                  + " expected (e.g.: Foo.Bar)",
+              name));
+    }
+    this.moduleName = name.substring(0, lastDot);
+    this.entityName = name.substring(lastDot + 1);
+  }
+
+  public Identifier(
+      @NonNull String packageId, @NonNull String moduleName, @NonNull String entityName) {
+    this.packageId = packageId;
+    this.moduleName = moduleName;
+    this.entityName = entityName;
+  }
+
+  @NonNull
+  public static Identifier fromProto(ValueOuterClass.Identifier identifier) {
+    if (!identifier.getModuleName().isEmpty() && !identifier.getEntityName().isEmpty()) {
+      return new Identifier(
+          identifier.getPackageId(), identifier.getModuleName(), identifier.getEntityName());
+    } else {
+      throw new IllegalArgumentException(
+          String.format(
+              "Invalid identifier [%s]: both module_name and entity_name must be set.",
+              identifier));
+    }
+  }
+
+  public ValueOuterClass.Identifier toProto() {
+    return ValueOuterClass.Identifier.newBuilder()
+        .setPackageId(this.packageId)
+        .setModuleName(this.moduleName)
+        .setEntityName(this.entityName)
+        .build();
+  }
+
+  @NonNull
+  public String getPackageId() {
+    return packageId;
+  }
+
+  @NonNull
+  @Deprecated
+  public String getName() {
+    return moduleName.concat(".").concat(entityName);
+  }
+
+  @NonNull
+  public String getModuleName() {
+    return moduleName;
+  }
+
+  @NonNull
+  public String getEntityName() {
+    return entityName;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    Identifier that = (Identifier) o;
+    return Objects.equals(packageId, that.packageId)
+        && Objects.equals(moduleName, that.moduleName)
+        && Objects.equals(entityName, that.entityName);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(packageId, moduleName, entityName);
+  }
+
+  @Override
+  public String toString() {
+    return "Identifier{"
+        + "packageId='"
+        + packageId
+        + '\''
+        + ", moduleName='"
+        + moduleName
+        + '\''
+        + ", entityName='"
+        + entityName
+        + '\''
+        + '}';
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/InclusiveFilter.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/InclusiveFilter.java
new file mode 100644
index 0000000000..c8e58a1d0f
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/InclusiveFilter.java
@@ -0,0 +1,156 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.TransactionFilterOuterClass;
+import com.daml.ledger.api.v1.ValueOuterClass;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class InclusiveFilter extends Filter {
+
+  private Set templateIds;
+  private Map<@NonNull Identifier, Filter.@NonNull Interface> interfaceFilters;
+  private Map<@NonNull Identifier, Filter.@NonNull Template> templateFilters;
+
+  private InclusiveFilter(
+      @NonNull Set<@NonNull Identifier> templateIds,
+      @NonNull Map<@NonNull Identifier, Filter.@NonNull Interface> interfaceFilters,
+      @NonNull Map<@NonNull Identifier, Filter.@NonNull Template> templateFilters) {
+    this.templateIds = templateIds;
+    this.interfaceFilters = interfaceFilters;
+    this.templateFilters = templateFilters;
+  }
+
+  public InclusiveFilter(
+      @NonNull Map<@NonNull Identifier, Filter.@NonNull Interface> interfaceFilters,
+      @NonNull Map<@NonNull Identifier, Filter.@NonNull Template> templateFilters) {
+    this(Collections.emptySet(), interfaceFilters, templateFilters);
+  }
+
+  /**
+   * @deprecated Use {@link #ofTemplateIds} instead; {@code templateIds} must not include interface
+   *     IDs. Since Daml 2.4.0
+   */
+  @Deprecated
+  public InclusiveFilter(@NonNull Set<@NonNull Identifier> templateIds) {
+    this(templateIds, Collections.emptyMap());
+  }
+
+  /**
+   * @deprecated Use the constructor with {@link #templateFilters} instead of IDs. Since Daml 2.8.0
+   */
+  @Deprecated
+  public InclusiveFilter(
+      @NonNull Set<@NonNull Identifier> templateIds,
+      @NonNull Map<@NonNull Identifier, Filter.@NonNull Interface> interfaceIds) {
+    this(templateIds, interfaceIds, Collections.emptyMap());
+  }
+
+  public static InclusiveFilter ofTemplateIds(@NonNull Set<@NonNull Identifier> templateIds) {
+    return new InclusiveFilter(
+        Collections.emptyMap(),
+        templateIds.stream()
+            .collect(
+                Collectors.toUnmodifiableMap(
+                    Function.identity(), tId -> Template.HIDE_CREATED_EVENT_BLOB)));
+  }
+
+  @NonNull
+  public Set<@NonNull Identifier> getTemplateIds() {
+    return templateIds;
+  }
+
+  @NonNull
+  public Map<@NonNull Identifier, Filter.@NonNull Interface> getInterfaceFilters() {
+    return interfaceFilters;
+  }
+
+  @NonNull
+  public Map<@NonNull Identifier, Filter.@NonNull Template> getTemplateFilters() {
+    return templateFilters;
+  }
+
+  @SuppressWarnings("deprecation")
+  @Override
+  public TransactionFilterOuterClass.Filters toProto() {
+    ArrayList templateIds = new ArrayList<>(this.templateIds.size());
+    for (Identifier identifier : this.templateIds) {
+      templateIds.add(identifier.toProto());
+    }
+    TransactionFilterOuterClass.InclusiveFilters inclusiveFilter =
+        TransactionFilterOuterClass.InclusiveFilters.newBuilder()
+            .addAllTemplateIds(templateIds)
+            .addAllInterfaceFilters(
+                interfaceFilters.entrySet().stream()
+                    .map(idFilt -> idFilt.getValue().toProto(idFilt.getKey()))
+                    .collect(Collectors.toUnmodifiableList()))
+            .addAllTemplateFilters(
+                templateFilters.entrySet().stream()
+                    .map(
+                        templateFilter ->
+                            templateFilter.getValue().toProto(templateFilter.getKey()))
+                    .collect(Collectors.toUnmodifiableList()))
+            .build();
+    return TransactionFilterOuterClass.Filters.newBuilder().setInclusive(inclusiveFilter).build();
+  }
+
+  @SuppressWarnings("deprecation")
+  public static InclusiveFilter fromProto(
+      TransactionFilterOuterClass.InclusiveFilters inclusiveFilters) {
+    HashSet templateIds = new HashSet<>(inclusiveFilters.getTemplateIdsCount());
+    for (ValueOuterClass.Identifier templateId : inclusiveFilters.getTemplateIdsList()) {
+      templateIds.add(Identifier.fromProto(templateId));
+    }
+    var interfaceIds =
+        inclusiveFilters.getInterfaceFiltersList().stream()
+            .collect(
+                Collectors.toUnmodifiableMap(
+                    ifFilt -> Identifier.fromProto(ifFilt.getInterfaceId()),
+                    Filter.Interface::fromProto,
+                    Filter.Interface::merge));
+    var templateFilters =
+        inclusiveFilters.getTemplateFiltersList().stream()
+            .collect(
+                Collectors.toUnmodifiableMap(
+                    templateFilter -> Identifier.fromProto(templateFilter.getTemplateId()),
+                    Filter.Template::fromProto,
+                    Filter.Template::merge));
+    return new InclusiveFilter(templateIds, interfaceIds, templateFilters);
+  }
+
+  @Override
+  public String toString() {
+    return "InclusiveFilter{"
+        + "templateIds="
+        + templateIds
+        + ", interfaceFilters="
+        + interfaceFilters
+        + ", templateFilters="
+        + templateFilters
+        + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    InclusiveFilter that = (InclusiveFilter) o;
+    return Objects.equals(templateIds, that.templateIds)
+        && Objects.equals(interfaceFilters, that.interfaceFilters)
+        && Objects.equals(templateFilters, that.templateFilters);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(templateIds, interfaceFilters, templateFilters);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Int64.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Int64.java
new file mode 100644
index 0000000000..5de32e2358
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Int64.java
@@ -0,0 +1,44 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.ValueOuterClass;
+import java.util.Objects;
+
+public final class Int64 extends Value {
+
+  private long value;
+
+  public Int64(long int64) {
+    this.value = int64;
+  }
+
+  public long getValue() {
+    return value;
+  }
+
+  @Override
+  public ValueOuterClass.Value toProto() {
+    return ValueOuterClass.Value.newBuilder().setInt64(this.value).build();
+  }
+
+  @Override
+  public String toString() {
+    return "Int64{" + "value=" + value + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    Int64 int64 = (Int64) o;
+    return value == int64.value;
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(value);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/LedgerOffset.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/LedgerOffset.java
new file mode 100644
index 0000000000..1f7849b013
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/LedgerOffset.java
@@ -0,0 +1,126 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.LedgerOffsetOuterClass;
+import java.util.Objects;
+
+public abstract class LedgerOffset {
+
+  public static final class LedgerBegin extends LedgerOffset {
+    static LedgerBegin instance = new LedgerBegin();
+
+    private LedgerBegin() {}
+
+    public static LedgerBegin getInstance() {
+      return instance;
+    }
+
+    @Override
+    public String toString() {
+      return "LedgerOffset.Begin";
+    }
+  }
+
+  public static final class LedgerEnd extends LedgerOffset {
+    static LedgerEnd instance = new LedgerEnd();
+
+    private LedgerEnd() {}
+
+    public static LedgerEnd getInstance() {
+      return instance;
+    }
+
+    @Override
+    public String toString() {
+      return "LedgerOffset.End";
+    }
+  }
+
+  public static final class Absolute extends LedgerOffset {
+    private final String offset;
+
+    public Absolute(String offset) {
+      this.offset = offset;
+    }
+
+    public String getOffset() {
+      return offset;
+    }
+
+    @Override
+    public String toString() {
+      return "LedgerOffset.Absolute(" + offset + ')';
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+      Absolute absolute = (Absolute) o;
+      return Objects.equals(offset, absolute.offset);
+    }
+
+    @Override
+    public int hashCode() {
+
+      return Objects.hash(offset);
+    }
+  }
+
+  public static LedgerOffset fromProto(LedgerOffsetOuterClass.LedgerOffset ledgerOffset) {
+    switch (ledgerOffset.getValueCase()) {
+      case ABSOLUTE:
+        return new Absolute(ledgerOffset.getAbsolute());
+      case BOUNDARY:
+        switch (ledgerOffset.getBoundary()) {
+          case LEDGER_BEGIN:
+            return LedgerBegin.instance;
+          case LEDGER_END:
+            return LedgerEnd.instance;
+          case UNRECOGNIZED:
+          default:
+            throw new LedgerBoundaryUnrecognized(ledgerOffset.getBoundary());
+        }
+      case VALUE_NOT_SET:
+      default:
+        throw new LedgerBoundaryUnset(ledgerOffset);
+    }
+  }
+
+  public final LedgerOffsetOuterClass.LedgerOffset toProto() {
+    if (this instanceof LedgerBegin) {
+      return LedgerOffsetOuterClass.LedgerOffset.newBuilder()
+          .setBoundary(LedgerOffsetOuterClass.LedgerOffset.LedgerBoundary.LEDGER_BEGIN)
+          .build();
+    } else if (this instanceof LedgerEnd) {
+      return LedgerOffsetOuterClass.LedgerOffset.newBuilder()
+          .setBoundary(LedgerOffsetOuterClass.LedgerOffset.LedgerBoundary.LEDGER_END)
+          .build();
+    } else if (this instanceof Absolute) {
+      Absolute absolute = (Absolute) this;
+      return LedgerOffsetOuterClass.LedgerOffset.newBuilder().setAbsolute(absolute.offset).build();
+    } else {
+      throw new LedgerOffsetUnknown(this);
+    }
+  }
+}
+
+class LedgerBoundaryUnrecognized extends RuntimeException {
+  public LedgerBoundaryUnrecognized(LedgerOffsetOuterClass.LedgerOffset.LedgerBoundary boundary) {
+    super("Ledger Boundary unknown " + boundary.toString());
+  }
+}
+
+class LedgerBoundaryUnset extends RuntimeException {
+  public LedgerBoundaryUnset(LedgerOffsetOuterClass.LedgerOffset offset) {
+    super("Ledger Offset unset " + offset.toString());
+  }
+}
+
+class LedgerOffsetUnknown extends RuntimeException {
+  public LedgerOffsetUnknown(LedgerOffset offset) {
+    super("Ledger offset unkwnown " + offset.toString());
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ListUserRightsRequest.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ListUserRightsRequest.java
new file mode 100644
index 0000000000..a5abccf815
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ListUserRightsRequest.java
@@ -0,0 +1,45 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.admin.UserManagementServiceOuterClass;
+import java.util.Objects;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class ListUserRightsRequest {
+
+  private final String userId;
+
+  public ListUserRightsRequest(@NonNull String userId) {
+    this.userId = userId;
+  }
+
+  public String getId() {
+    return userId;
+  }
+
+  @Override
+  public String toString() {
+    return "ListUserRightsRequest{" + "userId='" + userId + '\'' + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    ListUserRightsRequest that = (ListUserRightsRequest) o;
+    return Objects.equals(userId, that.userId);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(userId);
+  }
+
+  public UserManagementServiceOuterClass.ListUserRightsRequest toProto() {
+    return UserManagementServiceOuterClass.ListUserRightsRequest.newBuilder()
+        .setUserId(this.userId)
+        .build();
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ListUserRightsResponse.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ListUserRightsResponse.java
new file mode 100644
index 0000000000..8ca0d01112
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ListUserRightsResponse.java
@@ -0,0 +1,48 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.admin.UserManagementServiceOuterClass;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import java.util.stream.Collectors;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class ListUserRightsResponse {
+
+  private final List rights;
+
+  public ListUserRightsResponse(@NonNull List rights) {
+    this.rights = new ArrayList<>(rights);
+  }
+
+  public List getRights() {
+    return new ArrayList<>(this.rights);
+  }
+
+  public static ListUserRightsResponse fromProto(
+      UserManagementServiceOuterClass.ListUserRightsResponse proto) {
+    return new ListUserRightsResponse(
+        proto.getRightsList().stream().map(User.Right::fromProto).collect(Collectors.toList()));
+  }
+
+  @Override
+  public String toString() {
+    return "ListUserRightsResponse{" + "rights=" + rights + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    ListUserRightsResponse that = (ListUserRightsResponse) o;
+    return Objects.equals(rights, that.rights);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(rights);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ListUsersRequest.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ListUsersRequest.java
new file mode 100644
index 0000000000..9a95d796b3
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ListUsersRequest.java
@@ -0,0 +1,55 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.admin.UserManagementServiceOuterClass;
+import java.util.Objects;
+import java.util.Optional;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class ListUsersRequest {
+
+  private final Optional pageToken;
+
+  private final Integer pageSize;
+
+  public ListUsersRequest(@NonNull Optional pageToken, @NonNull Integer pageSize) {
+    this.pageToken = pageToken;
+    this.pageSize = pageSize;
+  }
+
+  public Optional getPageToken() {
+    return pageToken;
+  }
+
+  public Integer getPageSize() {
+    return pageSize;
+  }
+
+  @Override
+  public String toString() {
+    return "ListUsersRequest{" + "pageToken=" + pageToken + ", pageSize=" + pageSize + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    ListUsersRequest that = (ListUsersRequest) o;
+    return Objects.equals(pageToken, that.pageToken) && Objects.equals(pageSize, that.pageSize);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(pageToken, pageSize);
+  }
+
+  public UserManagementServiceOuterClass.ListUsersRequest toProto() {
+    UserManagementServiceOuterClass.ListUsersRequest.Builder builder =
+        UserManagementServiceOuterClass.ListUsersRequest.newBuilder();
+    pageToken.ifPresent(builder::setPageToken);
+    builder.setPageSize(pageSize);
+    return builder.build();
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ListUsersResponse.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ListUsersResponse.java
new file mode 100644
index 0000000000..dfe9b00097
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/ListUsersResponse.java
@@ -0,0 +1,48 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.admin.UserManagementServiceOuterClass;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import java.util.stream.Collectors;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class ListUsersResponse {
+
+  private final List users;
+
+  public ListUsersResponse(@NonNull List users) {
+    this.users = new ArrayList<>(users);
+  }
+
+  public List getUsers() {
+    return new ArrayList<>(this.users);
+  }
+
+  public static ListUsersResponse fromProto(
+      UserManagementServiceOuterClass.ListUsersResponse proto) {
+    return new ListUsersResponse(
+        proto.getUsersList().stream().map(User::fromProto).collect(Collectors.toList()));
+  }
+
+  @Override
+  public String toString() {
+    return "ListUsersResponse{" + "users=" + users + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    ListUsersResponse that = (ListUsersResponse) o;
+    return Objects.equals(users, that.users);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(users);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/NoFilter.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/NoFilter.java
new file mode 100644
index 0000000000..36d8c6dea1
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/NoFilter.java
@@ -0,0 +1,18 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.TransactionFilterOuterClass;
+
+public final class NoFilter extends Filter {
+
+  public static final NoFilter instance = new NoFilter();
+
+  private NoFilter() {}
+
+  @Override
+  public TransactionFilterOuterClass.Filters toProto() {
+    return TransactionFilterOuterClass.Filters.getDefaultInstance();
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Numeric.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Numeric.java
new file mode 100644
index 0000000000..29ad995f06
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Numeric.java
@@ -0,0 +1,51 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.ValueOuterClass;
+import java.math.BigDecimal;
+import java.util.Objects;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public class Numeric extends Value {
+
+  private final BigDecimal value;
+
+  public Numeric(@NonNull BigDecimal value) {
+    this.value = value;
+  }
+
+  public static Numeric fromProto(String numeric) {
+    return new Numeric(new BigDecimal(numeric));
+  }
+
+  @Override
+  public ValueOuterClass.Value toProto() {
+    return ValueOuterClass.Value.newBuilder().setNumeric(this.value.toPlainString()).build();
+  }
+
+  @NonNull
+  public BigDecimal getValue() {
+    return value;
+  }
+
+  @Override
+  public String toString() {
+    return "Numeric{" + "value=" + value + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    Numeric numeric = (Numeric) o;
+    return Objects.equals(value, numeric.value);
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(value);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Party.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Party.java
new file mode 100644
index 0000000000..3023bc4877
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Party.java
@@ -0,0 +1,46 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.ValueOuterClass;
+import java.util.Objects;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class Party extends Value {
+
+  private final String value;
+
+  public Party(@NonNull String value) {
+    this.value = value;
+  }
+
+  @Override
+  public ValueOuterClass.Value toProto() {
+    return ValueOuterClass.Value.newBuilder().setParty(this.value).build();
+  }
+
+  @NonNull
+  public String getValue() {
+    return value;
+  }
+
+  @Override
+  public String toString() {
+    return "Party{" + "value='" + value + '\'' + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    Party party = (Party) o;
+    return Objects.equals(value, party.value);
+  }
+
+  @Override
+  public int hashCode() {
+
+    return Objects.hash(value);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Record.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Record.java
new file mode 100644
index 0000000000..c9766bdfcf
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Record.java
@@ -0,0 +1,87 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import static java.util.Collections.unmodifiableList;
+
+import com.daml.ledger.api.v1.ValueOuterClass;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+// FIXME When removing this after the deprecation period is over, make DamlRecord final
+/** @deprecated Use {@link DamlRecord} instead. */
+@Deprecated
+public final class Record extends DamlRecord {
+
+  public Record(@NonNull Identifier recordId, @NonNull Field... fields) {
+    this(recordId, Arrays.asList(fields));
+  }
+
+  public Record(@NonNull Field... fields) {
+    super(Arrays.asList(fields));
+  }
+
+  public Record(@NonNull Identifier recordId, @NonNull List<@NonNull Field> fields) {
+    super(recordId, unmodifiableList(fields));
+  }
+
+  public Record(@NonNull List<@NonNull Field> fields) {
+    super(unmodifiableList(fields));
+  }
+
+  public Record(
+      @NonNull Optional recordId,
+      @NonNull List<@NonNull Field> fields,
+      Map fieldsMap) {
+    super(recordId, unmodifiableList(fields), fieldsMap);
+  }
+
+  /** @deprecated Use {@link DamlRecord#fromProto(ValueOuterClass.Record)} instead */
+  @Deprecated
+  @NonNull
+  public static Record fromProto(ValueOuterClass.Record record) {
+    ArrayList fields = new ArrayList<>(record.getFieldsCount());
+    HashMap fieldsMap = new HashMap<>(record.getFieldsCount());
+    for (ValueOuterClass.RecordField recordField : record.getFieldsList()) {
+      Field field = Field.fromProto(recordField);
+      fields.add(field);
+      if (field.getLabel().isPresent()) {
+        fieldsMap.put(field.getLabel().get(), field.getValue());
+      }
+    }
+    if (record.hasRecordId()) {
+      Identifier recordId = Identifier.fromProto(record.getRecordId());
+      return new Record(Optional.of(recordId), fields, fieldsMap);
+    } else {
+      return new Record(Optional.empty(), fields, fieldsMap);
+    }
+  }
+
+  // FIXME When removing this after the deprecation period is over, make DamlTextMap.Field final
+  /** @deprecated Use {@link DamlRecord.Field} instead. */
+  @Deprecated
+  public static final class Field extends DamlRecord.Field {
+
+    public Field(@NonNull String label, @NonNull Value value) {
+      super(label, value);
+    }
+
+    public Field(@NonNull Value value) {
+      super(value);
+    }
+
+    /** @deprecated Use {@link DamlRecord.Field#fromProto(ValueOuterClass.Record)} instead */
+    @Deprecated
+    public static Field fromProto(ValueOuterClass.RecordField field) {
+      String label = field.getLabel();
+      Value value = Value.fromProto(field.getValue());
+      return label.isEmpty() ? new Field(value) : new Field(label, value);
+    }
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/RevokeUserRightsRequest.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/RevokeUserRightsRequest.java
new file mode 100644
index 0000000000..ac99daac94
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/RevokeUserRightsRequest.java
@@ -0,0 +1,57 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.admin.UserManagementServiceOuterClass;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Objects;
+import java.util.stream.Collectors;
+
+public final class RevokeUserRightsRequest {
+
+  private final String userId;
+  private final List rights;
+
+  public RevokeUserRightsRequest(String userId, User.Right right, User.Right... rights) {
+    this.userId = userId;
+    this.rights = new ArrayList<>(rights.length + 1);
+    this.rights.add(right);
+    this.rights.addAll(Arrays.asList(rights));
+  }
+
+  public String getUserId() {
+    return userId;
+  }
+
+  public List getRights() {
+    return new ArrayList<>(rights);
+  }
+
+  public UserManagementServiceOuterClass.RevokeUserRightsRequest toProto() {
+    return UserManagementServiceOuterClass.RevokeUserRightsRequest.newBuilder()
+        .setUserId(this.userId)
+        .addAllRights(this.rights.stream().map(User.Right::toProto).collect(Collectors.toList()))
+        .build();
+  }
+
+  @Override
+  public String toString() {
+    return "RevokeUserRightsRequest{" + "userId=" + userId + ", rights=" + rights + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    RevokeUserRightsRequest that = (RevokeUserRightsRequest) o;
+    return userId.equals(that.userId) && rights.equals(that.rights);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(userId, rights);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/RevokeUserRightsResponse.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/RevokeUserRightsResponse.java
new file mode 100644
index 0000000000..79fd2d3545
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/RevokeUserRightsResponse.java
@@ -0,0 +1,50 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.admin.UserManagementServiceOuterClass;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import java.util.stream.Collectors;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class RevokeUserRightsResponse {
+
+  private final List newlyRevokedRights;
+
+  public RevokeUserRightsResponse(@NonNull List newlyRevokedRights) {
+    this.newlyRevokedRights = new ArrayList<>(newlyRevokedRights);
+  }
+
+  public List getNewlyRevokedRights() {
+    return new ArrayList<>(this.newlyRevokedRights);
+  }
+
+  public static RevokeUserRightsResponse fromProto(
+      UserManagementServiceOuterClass.RevokeUserRightsResponse proto) {
+    return new RevokeUserRightsResponse(
+        proto.getNewlyRevokedRightsList().stream()
+            .map(User.Right::fromProto)
+            .collect(Collectors.toList()));
+  }
+
+  @Override
+  public String toString() {
+    return "RevokeUserRightsResponse{" + "newlyRevokedRights=" + newlyRevokedRights + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    RevokeUserRightsResponse that = (RevokeUserRightsResponse) o;
+    return Objects.equals(newlyRevokedRights, that.newlyRevokedRights);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(newlyRevokedRights);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/SubmitAndWaitRequest.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/SubmitAndWaitRequest.java
new file mode 100644
index 0000000000..fb7d970a2f
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/SubmitAndWaitRequest.java
@@ -0,0 +1,146 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.CommandServiceOuterClass;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.List;
+import java.util.Optional;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class SubmitAndWaitRequest {
+
+  public static CommandServiceOuterClass.SubmitAndWaitRequest toProto(
+      @NonNull String ledgerId, @NonNull CommandsSubmission submission) {
+    return CommandServiceOuterClass.SubmitAndWaitRequest.newBuilder()
+        .setCommands(SubmitCommandsRequest.toProto(ledgerId, submission))
+        .build();
+  }
+
+  public static CommandServiceOuterClass.SubmitAndWaitRequest toProto(
+      @NonNull String ledgerId,
+      @NonNull String submissionId,
+      @NonNull CommandsSubmission submission) {
+    return CommandServiceOuterClass.SubmitAndWaitRequest.newBuilder()
+        .setCommands(SubmitCommandsRequest.toProto(ledgerId, submissionId, submission))
+        .build();
+  }
+
+  /** @deprecated since 2.5. Please use {@link #toProto(String, CommandsSubmission)} */
+  @Deprecated
+  public static CommandServiceOuterClass.SubmitAndWaitRequest toProto(
+      @NonNull String ledgerId,
+      @NonNull String workflowId,
+      @NonNull String applicationId,
+      @NonNull String commandId,
+      @NonNull String party,
+      @NonNull Optional minLedgerTimeAbsolute,
+      @NonNull Optional minLedgerTimeRelative,
+      @NonNull Optional deduplicationTime,
+      @NonNull List<@NonNull Command> commands) {
+    return CommandServiceOuterClass.SubmitAndWaitRequest.newBuilder()
+        .setCommands(
+            SubmitCommandsRequest.toProto(
+                ledgerId,
+                workflowId,
+                applicationId,
+                commandId,
+                party,
+                minLedgerTimeAbsolute,
+                minLedgerTimeRelative,
+                deduplicationTime,
+                commands))
+        .build();
+  }
+
+  /** @deprecated since 2.5. Please use {@link #toProto(String, String, CommandsSubmission)} */
+  @Deprecated
+  public static CommandServiceOuterClass.SubmitAndWaitRequest toProto(
+      @NonNull String ledgerId,
+      @NonNull String workflowId,
+      @NonNull String applicationId,
+      @NonNull String commandId,
+      @NonNull String submissionId,
+      @NonNull String party,
+      @NonNull Optional minLedgerTimeAbsolute,
+      @NonNull Optional minLedgerTimeRelative,
+      @NonNull Optional deduplicationTime,
+      @NonNull List<@NonNull Command> commands) {
+    return CommandServiceOuterClass.SubmitAndWaitRequest.newBuilder()
+        .setCommands(
+            SubmitCommandsRequest.toProto(
+                ledgerId,
+                workflowId,
+                applicationId,
+                commandId,
+                submissionId,
+                party,
+                minLedgerTimeAbsolute,
+                minLedgerTimeRelative,
+                deduplicationTime,
+                commands))
+        .build();
+  }
+
+  /** @deprecated since 2.5. Please use {@link #toProto(String, CommandsSubmission)} */
+  @Deprecated
+  public static CommandServiceOuterClass.SubmitAndWaitRequest toProto(
+      @NonNull String ledgerId,
+      @NonNull String workflowId,
+      @NonNull String applicationId,
+      @NonNull String commandId,
+      @NonNull List<@NonNull String> actAs,
+      @NonNull List<@NonNull String> readAs,
+      @NonNull Optional minLedgerTimeAbsolute,
+      @NonNull Optional minLedgerTimeRelative,
+      @NonNull Optional deduplicationTime,
+      @NonNull List<@NonNull Command> commands) {
+    return CommandServiceOuterClass.SubmitAndWaitRequest.newBuilder()
+        .setCommands(
+            SubmitCommandsRequest.toProto(
+                ledgerId,
+                workflowId,
+                applicationId,
+                commandId,
+                actAs,
+                readAs,
+                minLedgerTimeAbsolute,
+                minLedgerTimeRelative,
+                deduplicationTime,
+                commands))
+        .build();
+  }
+
+  /** @deprecated since 2.5. Please use {@link #toProto(String, String, CommandsSubmission)} */
+  @Deprecated
+  public static CommandServiceOuterClass.SubmitAndWaitRequest toProto(
+      @NonNull String ledgerId,
+      @NonNull String workflowId,
+      @NonNull String applicationId,
+      @NonNull String commandId,
+      @NonNull String submissionId,
+      @NonNull List<@NonNull String> actAs,
+      @NonNull List<@NonNull String> readAs,
+      @NonNull Optional minLedgerTimeAbsolute,
+      @NonNull Optional minLedgerTimeRelative,
+      @NonNull Optional deduplicationTime,
+      @NonNull List<@NonNull Command> commands) {
+    return CommandServiceOuterClass.SubmitAndWaitRequest.newBuilder()
+        .setCommands(
+            SubmitCommandsRequest.toProto(
+                ledgerId,
+                workflowId,
+                applicationId,
+                commandId,
+                submissionId,
+                actAs,
+                readAs,
+                minLedgerTimeAbsolute,
+                minLedgerTimeRelative,
+                deduplicationTime,
+                commands))
+        .build();
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/SubmitCommandsRequest.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/SubmitCommandsRequest.java
new file mode 100644
index 0000000000..8ad69b3104
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/SubmitCommandsRequest.java
@@ -0,0 +1,571 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import static com.daml.ledger.javaapi.data.codegen.HasCommands.toCommands;
+import static java.util.Arrays.asList;
+
+import com.daml.ledger.api.v1.CommandsOuterClass;
+import com.google.protobuf.Timestamp;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.stream.Collectors;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class SubmitCommandsRequest {
+
+  private final String workflowId;
+
+  private final String applicationId;
+
+  private final String commandId;
+
+  private final String party;
+  private final List actAs;
+  private final List readAs;
+
+  private final Optional minLedgerTimeAbsolute;
+  private final Optional minLedgerTimeRelative;
+  private final Optional deduplicationTime;
+  private final Optional submissionId;
+  private final List commands;
+
+  public SubmitCommandsRequest(
+      @NonNull String workflowId,
+      @NonNull String applicationId,
+      @NonNull String commandId,
+      @NonNull String party,
+      @NonNull Optional minLedgerTimeAbsolute,
+      @NonNull Optional minLedgerTimeRelative,
+      @NonNull Optional deduplicationTime,
+      @NonNull List<@NonNull Command> commands) {
+    this(
+        workflowId,
+        applicationId,
+        commandId,
+        asList(party),
+        asList(),
+        minLedgerTimeAbsolute,
+        minLedgerTimeRelative,
+        deduplicationTime,
+        commands);
+  }
+
+  public SubmitCommandsRequest(
+      @NonNull String workflowId,
+      @NonNull String applicationId,
+      @NonNull String commandId,
+      @NonNull String submissionId,
+      @NonNull String party,
+      @NonNull Optional minLedgerTimeAbsolute,
+      @NonNull Optional minLedgerTimeRelative,
+      @NonNull Optional deduplicationTime,
+      @NonNull List<@NonNull Command> commands) {
+    this(
+        workflowId,
+        applicationId,
+        commandId,
+        asList(party),
+        asList(),
+        minLedgerTimeAbsolute,
+        minLedgerTimeRelative,
+        deduplicationTime,
+        Optional.of(submissionId),
+        commands);
+  }
+
+  public SubmitCommandsRequest(
+      @NonNull String workflowId,
+      @NonNull String applicationId,
+      @NonNull String commandId,
+      @NonNull List<@NonNull String> actAs,
+      @NonNull List<@NonNull String> readAs,
+      @NonNull Optional minLedgerTimeAbsolute,
+      @NonNull Optional minLedgerTimeRelative,
+      @NonNull Optional deduplicationTime,
+      @NonNull List<@NonNull Command> commands) {
+    this(
+        workflowId,
+        applicationId,
+        commandId,
+        actAs,
+        readAs,
+        minLedgerTimeAbsolute,
+        minLedgerTimeRelative,
+        deduplicationTime,
+        Optional.empty(),
+        commands);
+  }
+
+  public SubmitCommandsRequest(
+      @NonNull String workflowId,
+      @NonNull String applicationId,
+      @NonNull String commandId,
+      @NonNull String submissionId,
+      @NonNull List<@NonNull String> actAs,
+      @NonNull List<@NonNull String> readAs,
+      @NonNull Optional minLedgerTimeAbsolute,
+      @NonNull Optional minLedgerTimeRelative,
+      @NonNull Optional deduplicationTime,
+      @NonNull List<@NonNull Command> commands) {
+    this(
+        workflowId,
+        applicationId,
+        commandId,
+        actAs,
+        readAs,
+        minLedgerTimeAbsolute,
+        minLedgerTimeRelative,
+        deduplicationTime,
+        Optional.of(submissionId),
+        commands);
+  }
+
+  private SubmitCommandsRequest(
+      @NonNull String workflowId,
+      @NonNull String applicationId,
+      @NonNull String commandId,
+      @NonNull List<@NonNull String> actAs,
+      @NonNull List<@NonNull String> readAs,
+      @NonNull Optional minLedgerTimeAbsolute,
+      @NonNull Optional minLedgerTimeRelative,
+      @NonNull Optional deduplicationTime,
+      @NonNull Optional submissionId,
+      @NonNull List<@NonNull Command> commands) {
+    if (actAs.size() == 0) {
+      throw new IllegalArgumentException("actAs must have at least one element");
+    }
+    this.workflowId = workflowId;
+    this.applicationId = applicationId;
+    this.commandId = commandId;
+    this.party = actAs.get(0);
+    this.actAs = List.copyOf(actAs);
+    this.readAs = List.copyOf(readAs);
+    this.minLedgerTimeAbsolute = minLedgerTimeAbsolute;
+    this.minLedgerTimeRelative = minLedgerTimeRelative;
+    this.deduplicationTime = deduplicationTime;
+    this.submissionId = submissionId;
+    this.commands = commands;
+  }
+
+  public static SubmitCommandsRequest fromProto(CommandsOuterClass.Commands commands) {
+    String workflowId = commands.getWorkflowId();
+    String applicationId = commands.getApplicationId();
+    String commandId = commands.getCommandId();
+    String party = commands.getParty();
+    List actAs = commands.getActAsList();
+    List readAs = commands.getReadAsList();
+    Optional minLedgerTimeAbs =
+        commands.hasMinLedgerTimeAbs()
+            ? Optional.of(
+                Instant.ofEpochSecond(
+                    commands.getMinLedgerTimeAbs().getSeconds(),
+                    commands.getMinLedgerTimeAbs().getNanos()))
+            : Optional.empty();
+    Optional minLedgerTimeRel =
+        commands.hasMinLedgerTimeRel()
+            ? Optional.of(
+                Duration.ofSeconds(
+                    commands.getMinLedgerTimeRel().getSeconds(),
+                    commands.getMinLedgerTimeRel().getNanos()))
+            : Optional.empty();
+    Optional deduplicationPeriod = Optional.empty();
+    switch (commands.getDeduplicationPeriodCase()) {
+      case DEDUPLICATION_DURATION:
+        com.google.protobuf.Duration d = commands.getDeduplicationDuration();
+        deduplicationPeriod = Optional.of(Duration.ofSeconds(d.getSeconds(), d.getNanos()));
+        break;
+      case DEDUPLICATION_TIME:
+        @SuppressWarnings("deprecation")
+        com.google.protobuf.Duration t = commands.getDeduplicationTime();
+        deduplicationPeriod = Optional.of(Duration.ofSeconds(t.getSeconds(), t.getNanos()));
+        break;
+      case DEDUPLICATIONPERIOD_NOT_SET:
+      default:
+        // Backwards compatibility: do not throw, this field could be empty from a previous version
+    }
+    String submissionId = commands.getSubmissionId();
+    ArrayList listOfCommands = new ArrayList<>(commands.getCommandsCount());
+    for (CommandsOuterClass.Command command : commands.getCommandsList()) {
+      listOfCommands.add(Command.fromProtoCommand(command));
+    }
+    if (!actAs.contains(party)) {
+      actAs.add(0, party);
+    }
+    return new SubmitCommandsRequest(
+        workflowId,
+        applicationId,
+        commandId,
+        actAs,
+        readAs,
+        minLedgerTimeAbs,
+        minLedgerTimeRel,
+        deduplicationPeriod,
+        submissionId.isEmpty() ? Optional.empty() : Optional.of(submissionId),
+        listOfCommands);
+  }
+
+  // TODO(i15642) Refactor this to take CommmandsSubmission when deprecated methods using it below are
+  // removed
+  private static CommandsOuterClass.Commands deprecatedToProto(
+      @NonNull String ledgerId,
+      @NonNull String workflowId,
+      @NonNull String applicationId,
+      @NonNull String commandId,
+      @NonNull List<@NonNull String> actAs,
+      @NonNull List<@NonNull String> readAs,
+      @NonNull Optional minLedgerTimeAbsolute,
+      @NonNull Optional minLedgerTimeRelative,
+      @NonNull Optional deduplicationTime,
+      @NonNull Optional submissionId,
+      @NonNull List<@NonNull Command> commands) {
+    if (actAs.size() == 0) {
+      throw new IllegalArgumentException("actAs must have at least one element");
+    }
+    List commandsConverted =
+        commands.stream().map(Command::toProtoCommand).collect(Collectors.toList());
+    CommandsOuterClass.Commands.Builder builder =
+        CommandsOuterClass.Commands.newBuilder()
+            .setLedgerId(ledgerId)
+            .setWorkflowId(workflowId)
+            .setApplicationId(applicationId)
+            .setCommandId(commandId)
+            .setParty(actAs.get(0))
+            .addAllActAs(actAs)
+            .addAllReadAs(readAs)
+            .addAllCommands(commandsConverted);
+    minLedgerTimeAbsolute.ifPresent(
+        abs ->
+            builder.setMinLedgerTimeAbs(
+                Timestamp.newBuilder().setSeconds(abs.getEpochSecond()).setNanos(abs.getNano())));
+    minLedgerTimeRelative.ifPresent(
+        rel ->
+            builder.setMinLedgerTimeRel(
+                com.google.protobuf.Duration.newBuilder()
+                    .setSeconds(rel.getSeconds())
+                    .setNanos(rel.getNano())));
+    deduplicationTime.ifPresent(
+        dedup -> {
+          @SuppressWarnings("deprecation")
+          var unused =
+              builder.setDeduplicationTime(
+                  com.google.protobuf.Duration.newBuilder()
+                      .setSeconds(dedup.getSeconds())
+                      .setNanos(dedup.getNano()));
+        });
+    submissionId.ifPresent(builder::setSubmissionId);
+    return builder.build();
+  }
+
+  private static CommandsOuterClass.Commands toProto(
+      @NonNull String ledgerId,
+      @NonNull Optional submissionId,
+      @NonNull CommandsSubmission submission) {
+
+    if (submission.getActAs().size() == 0) {
+      throw new IllegalArgumentException("actAs must have at least one element");
+    }
+
+    List commands = toCommands(submission.getCommands());
+    List commandsConverted =
+        commands.stream().map(Command::toProtoCommand).collect(Collectors.toList());
+    List disclosedContracts =
+        submission.getDisclosedContracts().stream()
+            .map(DisclosedContract::toProto)
+            .collect(Collectors.toList());
+
+    CommandsOuterClass.Commands.Builder builder =
+        CommandsOuterClass.Commands.newBuilder()
+            .setLedgerId(ledgerId)
+            .setApplicationId(submission.getApplicationId())
+            .setCommandId(submission.getCommandId())
+            .setParty(submission.getActAs().get(0))
+            .addAllActAs(submission.getActAs())
+            .addAllReadAs(submission.getReadAs())
+            .addAllCommands(commandsConverted)
+            .addAllDisclosedContracts(disclosedContracts);
+
+    submission
+        .getMinLedgerTimeAbs()
+        .ifPresent(
+            abs ->
+                builder.setMinLedgerTimeAbs(
+                    Timestamp.newBuilder()
+                        .setSeconds(abs.getEpochSecond())
+                        .setNanos(abs.getNano())));
+
+    submission
+        .getMinLedgerTimeRel()
+        .ifPresent(
+            rel ->
+                builder.setMinLedgerTimeRel(
+                    com.google.protobuf.Duration.newBuilder()
+                        .setSeconds(rel.getSeconds())
+                        .setNanos(rel.getNano())));
+
+    submission
+        .getDeduplicationTime()
+        .ifPresent(
+            dedup -> {
+              @SuppressWarnings("deprecation")
+              var unused =
+                  builder.setDeduplicationTime(
+                      com.google.protobuf.Duration.newBuilder()
+                          .setSeconds(dedup.getSeconds())
+                          .setNanos(dedup.getNano()));
+            });
+
+    submission.getWorkflowId().ifPresent(builder::setWorkflowId);
+    submissionId.ifPresent(builder::setSubmissionId);
+
+    return builder.build();
+  }
+
+  public static CommandsOuterClass.Commands toProto(
+      @NonNull String ledgerId, @NonNull CommandsSubmission submission) {
+    return toProto(ledgerId, Optional.empty(), submission);
+  }
+
+  /** @deprecated since 2.5. Please use {@link #toProto(String, CommandsSubmission)} */
+  @Deprecated
+  public static CommandsOuterClass.Commands toProto(
+      @NonNull String ledgerId,
+      @NonNull String workflowId,
+      @NonNull String applicationId,
+      @NonNull String commandId,
+      @NonNull List<@NonNull String> actAs,
+      @NonNull List<@NonNull String> readAs,
+      @NonNull Optional minLedgerTimeAbsolute,
+      @NonNull Optional minLedgerTimeRelative,
+      @NonNull Optional deduplicationTime,
+      @NonNull List<@NonNull Command> commands) {
+    return deprecatedToProto(
+        ledgerId,
+        workflowId,
+        applicationId,
+        commandId,
+        actAs,
+        readAs,
+        minLedgerTimeAbsolute,
+        minLedgerTimeRelative,
+        deduplicationTime,
+        Optional.empty(),
+        commands);
+  }
+
+  public static CommandsOuterClass.Commands toProto(
+      @NonNull String ledgerId,
+      @NonNull String submissionId,
+      @NonNull CommandsSubmission submission) {
+    return toProto(ledgerId, Optional.of(submissionId), submission);
+  }
+
+  /** @deprecated since 2.5. Please use {@link #toProto(String, String, CommandsSubmission)} */
+  @Deprecated
+  public static CommandsOuterClass.Commands toProto(
+      @NonNull String ledgerId,
+      @NonNull String workflowId,
+      @NonNull String applicationId,
+      @NonNull String commandId,
+      @NonNull String submissionId,
+      @NonNull List<@NonNull String> actAs,
+      @NonNull List<@NonNull String> readAs,
+      @NonNull Optional minLedgerTimeAbsolute,
+      @NonNull Optional minLedgerTimeRelative,
+      @NonNull Optional deduplicationTime,
+      @NonNull List<@NonNull Command> commands) {
+    return deprecatedToProto(
+        ledgerId,
+        workflowId,
+        applicationId,
+        commandId,
+        actAs,
+        readAs,
+        minLedgerTimeAbsolute,
+        minLedgerTimeRelative,
+        deduplicationTime,
+        Optional.of(submissionId),
+        commands);
+  }
+
+  /** @deprecated since 2.5. Please use {@link #toProto(String, String, CommandsSubmission)} */
+  @Deprecated
+  public static CommandsOuterClass.Commands toProto(
+      @NonNull String ledgerId,
+      @NonNull String workflowId,
+      @NonNull String applicationId,
+      @NonNull String commandId,
+      @NonNull String submissionId,
+      @NonNull String party,
+      @NonNull Optional minLedgerTimeAbsolute,
+      @NonNull Optional minLedgerTimeRelative,
+      @NonNull Optional deduplicationTime,
+      @NonNull List<@NonNull Command> commands) {
+    List empty_read_as = new ArrayList<>();
+    List act_as = new ArrayList<>();
+    act_as.add(party);
+    return deprecatedToProto(
+        ledgerId,
+        workflowId,
+        applicationId,
+        commandId,
+        act_as,
+        empty_read_as,
+        minLedgerTimeAbsolute,
+        minLedgerTimeRelative,
+        deduplicationTime,
+        Optional.of(submissionId),
+        commands);
+  }
+
+  /** @deprecated since 2.5. Please use {@link #toProto(String, CommandsSubmission)} */
+  @Deprecated
+  public static CommandsOuterClass.Commands toProto(
+      @NonNull String ledgerId,
+      @NonNull String workflowId,
+      @NonNull String applicationId,
+      @NonNull String commandId,
+      @NonNull String party,
+      @NonNull Optional minLedgerTimeAbsolute,
+      @NonNull Optional minLedgerTimeRelative,
+      @NonNull Optional deduplicationTime,
+      @NonNull List<@NonNull Command> commands) {
+    List empty_read_as = new ArrayList<>();
+    List act_as = new ArrayList<>();
+    act_as.add(party);
+    return deprecatedToProto(
+        ledgerId,
+        workflowId,
+        applicationId,
+        commandId,
+        act_as,
+        empty_read_as,
+        minLedgerTimeAbsolute,
+        minLedgerTimeRelative,
+        deduplicationTime,
+        Optional.empty(),
+        commands);
+  }
+
+  @NonNull
+  public String getWorkflowId() {
+    return workflowId;
+  }
+
+  @NonNull
+  public String getApplicationId() {
+    return applicationId;
+  }
+
+  @NonNull
+  public String getCommandId() {
+    return commandId;
+  }
+
+  @NonNull
+  public String getParty() {
+    return party;
+  }
+
+  @NonNull
+  public List getActAs() {
+    return actAs;
+  }
+
+  @NonNull
+  public List getReadAs() {
+    return readAs;
+  }
+
+  @NonNull
+  public Optional getMinLedgerTimeAbsolute() {
+    return minLedgerTimeAbsolute;
+  }
+
+  @NonNull
+  public Optional getMinLedgerTimeRelative() {
+    return minLedgerTimeRelative;
+  }
+
+  @NonNull
+  public Optional getDeduplicationTime() {
+    return deduplicationTime;
+  }
+
+  @NonNull
+  public Optional getSubmissionId() {
+    return submissionId;
+  }
+
+  @NonNull
+  public List<@NonNull Command> getCommands() {
+    return commands;
+  }
+
+  @Override
+  public String toString() {
+    return "SubmitCommandsRequest{"
+        + "workflowId='"
+        + workflowId
+        + '\''
+        + ", applicationId='"
+        + applicationId
+        + '\''
+        + ", commandId='"
+        + commandId
+        + '\''
+        + ", party='"
+        + party
+        + '\''
+        + ", minLedgerTimeAbs="
+        + minLedgerTimeAbsolute
+        + ", minLedgerTimeRel="
+        + minLedgerTimeRelative
+        + ", deduplicationTime="
+        + deduplicationTime
+        + ", submissionId="
+        + submissionId
+        + ", commands="
+        + commands
+        + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    SubmitCommandsRequest submitCommandsRequest1 = (SubmitCommandsRequest) o;
+    return Objects.equals(workflowId, submitCommandsRequest1.workflowId)
+        && Objects.equals(applicationId, submitCommandsRequest1.applicationId)
+        && Objects.equals(commandId, submitCommandsRequest1.commandId)
+        && Objects.equals(party, submitCommandsRequest1.party)
+        && Objects.equals(actAs, submitCommandsRequest1.actAs)
+        && Objects.equals(readAs, submitCommandsRequest1.readAs)
+        && Objects.equals(minLedgerTimeAbsolute, submitCommandsRequest1.minLedgerTimeAbsolute)
+        && Objects.equals(minLedgerTimeRelative, submitCommandsRequest1.minLedgerTimeRelative)
+        && Objects.equals(deduplicationTime, submitCommandsRequest1.deduplicationTime)
+        && Objects.equals(submissionId, submitCommandsRequest1.submissionId)
+        && Objects.equals(commands, submitCommandsRequest1.commands);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(
+        workflowId,
+        applicationId,
+        commandId,
+        party,
+        actAs,
+        readAs,
+        minLedgerTimeAbsolute,
+        minLedgerTimeRelative,
+        deduplicationTime,
+        submissionId,
+        commands);
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/SubmitRequest.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/SubmitRequest.java
new file mode 100644
index 0000000000..956779ed97
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/SubmitRequest.java
@@ -0,0 +1,146 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.api.v1.CommandSubmissionServiceOuterClass;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.List;
+import java.util.Optional;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public final class SubmitRequest {
+
+  public static CommandSubmissionServiceOuterClass.SubmitRequest toProto(
+      @NonNull String ledgerId, @NonNull CommandsSubmission submission) {
+    return CommandSubmissionServiceOuterClass.SubmitRequest.newBuilder()
+        .setCommands(SubmitCommandsRequest.toProto(ledgerId, submission))
+        .build();
+  }
+
+  public static CommandSubmissionServiceOuterClass.SubmitRequest toProto(
+      @NonNull String ledgerId,
+      @NonNull String submissionId,
+      @NonNull CommandsSubmission submission) {
+    return CommandSubmissionServiceOuterClass.SubmitRequest.newBuilder()
+        .setCommands(SubmitCommandsRequest.toProto(ledgerId, submissionId, submission))
+        .build();
+  }
+
+  /** @deprecated since 2.5. Please use {@link #toProto(String, CommandsSubmission)} */
+  @Deprecated
+  public static CommandSubmissionServiceOuterClass.SubmitRequest toProto(
+      @NonNull String ledgerId,
+      @NonNull String workflowId,
+      @NonNull String applicationId,
+      @NonNull String commandId,
+      @NonNull String party,
+      @NonNull Optional minLedgerTimeAbs,
+      @NonNull Optional minLedgerTimeRel,
+      @NonNull Optional deduplicationTime,
+      @NonNull List<@NonNull Command> commands) {
+    return CommandSubmissionServiceOuterClass.SubmitRequest.newBuilder()
+        .setCommands(
+            SubmitCommandsRequest.toProto(
+                ledgerId,
+                workflowId,
+                applicationId,
+                commandId,
+                party,
+                minLedgerTimeAbs,
+                minLedgerTimeRel,
+                deduplicationTime,
+                commands))
+        .build();
+  }
+
+  /** @deprecated since 2.5. Please use {@link #toProto(String, CommandsSubmission)} */
+  @Deprecated
+  public static CommandSubmissionServiceOuterClass.SubmitRequest toProto(
+      @NonNull String ledgerId,
+      @NonNull String workflowId,
+      @NonNull String applicationId,
+      @NonNull String commandId,
+      @NonNull List<@NonNull String> actAs,
+      @NonNull List<@NonNull String> readAs,
+      @NonNull Optional minLedgerTimeAbs,
+      @NonNull Optional minLedgerTimeRel,
+      @NonNull Optional deduplicationTime,
+      @NonNull List<@NonNull Command> commands) {
+    return CommandSubmissionServiceOuterClass.SubmitRequest.newBuilder()
+        .setCommands(
+            SubmitCommandsRequest.toProto(
+                ledgerId,
+                workflowId,
+                applicationId,
+                commandId,
+                actAs,
+                readAs,
+                minLedgerTimeAbs,
+                minLedgerTimeRel,
+                deduplicationTime,
+                commands))
+        .build();
+  }
+
+  /** @deprecated since 2.5. Please use {@link #toProto(String, String, CommandsSubmission)} */
+  @Deprecated
+  public static CommandSubmissionServiceOuterClass.SubmitRequest toProto(
+      @NonNull String ledgerId,
+      @NonNull String workflowId,
+      @NonNull String applicationId,
+      @NonNull String commandId,
+      @NonNull String submissionId,
+      @NonNull String party,
+      @NonNull Optional minLedgerTimeAbs,
+      @NonNull Optional minLedgerTimeRel,
+      @NonNull Optional deduplicationTime,
+      @NonNull List<@NonNull Command> commands) {
+    return CommandSubmissionServiceOuterClass.SubmitRequest.newBuilder()
+        .setCommands(
+            SubmitCommandsRequest.toProto(
+                ledgerId,
+                workflowId,
+                applicationId,
+                commandId,
+                submissionId,
+                party,
+                minLedgerTimeAbs,
+                minLedgerTimeRel,
+                deduplicationTime,
+                commands))
+        .build();
+  }
+
+  /** @deprecated since 2.5. Please use {@link #toProto(String, String, CommandsSubmission)} */
+  @Deprecated
+  public static CommandSubmissionServiceOuterClass.SubmitRequest toProto(
+      @NonNull String ledgerId,
+      @NonNull String workflowId,
+      @NonNull String applicationId,
+      @NonNull String commandId,
+      @NonNull String submissionId,
+      @NonNull List<@NonNull String> actAs,
+      @NonNull List<@NonNull String> readAs,
+      @NonNull Optional minLedgerTimeAbs,
+      @NonNull Optional minLedgerTimeRel,
+      @NonNull Optional deduplicationTime,
+      @NonNull List<@NonNull Command> commands) {
+    return CommandSubmissionServiceOuterClass.SubmitRequest.newBuilder()
+        .setCommands(
+            SubmitCommandsRequest.toProto(
+                ledgerId,
+                workflowId,
+                applicationId,
+                commandId,
+                submissionId,
+                actAs,
+                readAs,
+                minLedgerTimeAbs,
+                minLedgerTimeRel,
+                deduplicationTime,
+                commands))
+        .build();
+  }
+}
diff --git a/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Template.java b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Template.java
new file mode 100644
index 0000000000..00fed1adcb
--- /dev/null
+++ b/canton-3x/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/Template.java
@@ -0,0 +1,47 @@
+// Copyright (c) 2023 Digital Asset (Switzerland) GmbH and/or its affiliates.
+// Proprietary code. All rights reserved.
+
+package com.daml.ledger.javaapi.data;
+
+import com.daml.ledger.javaapi.data.codegen.Contract;
+import com.daml.ledger.javaapi.data.codegen.ContractCompanion;
+import com.daml.ledger.javaapi.data.codegen.ContractId;
+import com.daml.ledger.javaapi.data.codegen.CreateAnd;
+import com.daml.ledger.javaapi.data.codegen.Created;
+import com.daml.ledger.javaapi.data.codegen.Update;
+
+public abstract class Template extends com.daml.ledger.javaapi.data.codegen.DamlRecord