update canton to 20240527.13372.v40019d14 (#19277)

* update canton to 20240527.13372.v40019d14

tell-slack: canton

* sync to canton @ 6da046644316b095ba529e9064c9e4bc9ed4d7e0

* fix //canton:community_domain

* fix more import issues

* Disable tests for "Skipping upgrade validation" msg until canton#19354

---------

Co-authored-by: Azure Pipelines Daml Build <support@digitalasset.com>
Co-authored-by: Paul Brauner <paul.brauner@digitalasset.com>
Co-authored-by: Dylan Thinnes <dylan.thinnes@digitalasset.com>
This commit is contained in:
azure-pipelines[bot] 2024-05-28 18:32:35 +02:00 committed by GitHub
parent cca392ad54
commit 5e7a52378c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
399 changed files with 9130 additions and 7653 deletions

View File

@ -834,6 +834,7 @@ scala_library(
"@maven//:io_grpc_grpc_stub",
"@maven//:io_netty_netty_handler",
"@maven//:io_opentelemetry_opentelemetry_api",
"@maven//:io_scalaland_chimney_2_13",
"@maven//:junit_junit",
"@maven//:org_apache_pekko_pekko_actor_2_13",
"@maven//:org_postgresql_postgresql",
@ -963,6 +964,7 @@ scala_library(
"//libs-scala/resources-pekko",
"//libs-scala/rs-grpc-bridge",
"//libs-scala/scala-utils",
"//libs-scala/timer-utils",
"//observability/metrics",
"//observability/tracing",
"@maven//:com_auth0_java_jwt",
@ -987,7 +989,6 @@ scala_library(
"@maven//:io_netty_netty_handler",
"@maven//:io_opentelemetry_instrumentation_opentelemetry_grpc_1_6",
"@maven//:io_opentelemetry_opentelemetry_api",
"@maven//:io_opentelemetry_opentelemetry_context",
"@maven//:io_scalaland_chimney_2_13",
"@maven//:junit_junit",
"@maven//:org_apache_pekko_pekko_actor_2_13",
@ -1225,6 +1226,7 @@ scala_library(
"@maven//:com_github_pathikrit_better_files_2_13",
"@maven//:com_github_pureconfig_pureconfig_core_2_13",
"@maven//:com_github_scopt_scopt_2_13",
"@maven//:com_google_protobuf_protobuf_java",
"@maven//:com_lihaoyi_ammonite_2_13_11",
"@maven//:com_lihaoyi_ammonite_compiler_2_13_11",
"@maven//:com_lihaoyi_ammonite_compiler_interface_2_13_11",

View File

@ -56,7 +56,7 @@ final case class StaticDomainParameters(
BinaryFileUtil.writeByteStringToFile(outputFile, toInternal.toByteString)
private[canton] def toInternal: StaticDomainParametersInternal =
StaticDomainParametersInternal.create(
StaticDomainParametersInternal(
requiredSigningKeySchemes = NonEmptyUtil.fromUnsafe(
requiredSigningKeySchemes.map(_.transformInto[DomainCrypto.SigningKeyScheme])
),

View File

@ -893,6 +893,7 @@ abstract class SequencerReference(
val currentActive = currentMediators.item.active
val currentObservers = currentMediators.item.observers
val current = currentActive ++ currentObservers
val serial = currentMediators.context.serial.increment
val newMediators =
(additionalActive ++ additionalObservers).filterNot(m => current.contains(m.id))
@ -910,6 +911,7 @@ abstract class SequencerReference(
active = (currentActive ++ additionalActive.map(_.id)).distinct,
observers = (currentObservers ++ additionalObservers.map(_.id)).distinct,
group = group,
serial = Some(serial),
)
.discard

View File

@ -1748,15 +1748,11 @@ class TopologyAdministrationGroup(
- "<domain-id>": the topology transaction will be directly submitted to the specified domain without
storing it locally first. This also means it will _not_ be synchronized to other domains
automatically.
filterParticipant: Filter for participants starting with the given filter string.
mustFullyAuthorize: when set to true, the proposal's previously received signatures and the signature of this node must be
filterParticipant: Filter for participants starting with the given filter string.
mustFullyAuthorize: when set to true, the proposal's previously received signatures and the signature of this node must be
sufficient to fully authorize the topology transaction. if this is not the case, the request fails.
when set to false, the proposal retains the proposal status until enough signatures are accumulated to
satisfy the mapping's authorization requirements.
serial: the expected serial this topology transaction should have. Serials must be contiguous and start at 1.
This transaction will be rejected if another fully authorized transaction with the same serial already
exists, or if there is a gap between this serial and the most recently used serial.
If None, the serial will be automatically selected by the node.
signedBy: the fingerprint of the key to be used to sign this proposal
|"""
)
@ -1771,7 +1767,6 @@ class TopologyAdministrationGroup(
synchronize: Option[NonNegativeDuration] = Some(
consoleEnvironment.commandTimeouts.bounded
),
serial: Option[PositiveInt] = None,
signedBy: Option[Fingerprint] = Some(
instance.id.fingerprint
), // TODO(#12945) don't use the instance's root namespace key by default.
@ -1788,9 +1783,13 @@ class TopologyAdministrationGroup(
"Ensure that at least one of the two parameters (adds or removes) is not empty."
)
case (_, _) =>
val newDiffPackageIds = current0 match {
case Some(value) => ((value.item.packageIds ++ adds).diff(removes)).distinct
case None => (adds.diff(removes)).distinct
val (newSerial, newDiffPackageIds) = current0 match {
case Some(value) =>
(
value.context.serial.increment,
((value.item.packageIds ++ adds).diff(removes)).distinct,
)
case None => (PositiveInt.one, (adds.diff(removes)).distinct)
}
propose(
@ -1800,7 +1799,7 @@ class TopologyAdministrationGroup(
store,
mustFullyAuthorize,
synchronize,
serial,
Some(newSerial),
signedBy,
)
}
@ -2044,20 +2043,21 @@ class TopologyAdministrationGroup(
.verifyProposalConsistency(adds, removes, observerAdds, observerRemoves, updateThreshold)
.valueOr(err => throw new IllegalArgumentException(err))
def queryStore(proposals: Boolean): Option[MediatorDomainState] = expectAtMostOneResult(
list(
domainId.filterString,
group = Some(group),
operation = Some(TopologyChangeOp.Replace),
proposals = proposals,
)
).map(_.item)
def queryStore(proposals: Boolean): Option[(PositiveInt, MediatorDomainState)] =
expectAtMostOneResult(
list(
domainId.filterString,
group = Some(group),
operation = Some(TopologyChangeOp.Replace),
proposals = proposals,
)
).map(result => (result.context.serial, result.item))
val mdsO = queryStore(proposals = false)
val maybeSerialAndMediatorDomainState = queryStore(proposals = false)
MediatorGroupDeltaComputations
.verifyProposalAgainstCurrentState(
mdsO,
maybeSerialAndMediatorDomainState.map(_._2),
adds,
removes,
observerAdds,
@ -2066,15 +2066,16 @@ class TopologyAdministrationGroup(
)
.valueOr(err => throw new IllegalArgumentException(err))
val (threshold, active, observers) = mdsO match {
case Some(mds) =>
val (serial, threshold, active, observers) = maybeSerialAndMediatorDomainState match {
case Some((currentSerial, mds)) =>
(
currentSerial.increment,
mds.threshold,
mds.active.forgetNE.concat(adds).diff(removes),
mds.observers.concat(observerAdds).diff(observerRemoves),
)
case None =>
(PositiveInt.one, adds, observerAdds)
(PositiveInt.one, PositiveInt.one, adds, observerAdds)
}
propose(
@ -2087,15 +2088,18 @@ class TopologyAdministrationGroup(
synchronize = None, // no synchronize - instead rely on await below
mustFullyAuthorize = mustFullyAuthorize,
signedBy = signedBy,
serial = Some(serial),
).discard
await.foreach { timeout =>
ConsoleMacros.utils.retry_until_true(timeout) {
def areAllChangesPersisted(mds: MediatorDomainState): Boolean = {
adds.forall(mds.active.contains) && removes.forall(!mds.active.contains(_)) &&
observerAdds.forall(mds.observers.contains) && observerRemoves.forall(
!mds.observers.contains(_)
) && updateThreshold.forall(_ == mds.threshold)
def areAllChangesPersisted: ((PositiveInt, MediatorDomainState)) => Boolean = {
case (serialFound, mds) =>
serialFound == serial &&
adds.forall(mds.active.contains) && removes.forall(!mds.active.contains(_)) &&
observerAdds.forall(mds.observers.contains) && observerRemoves.forall(
!mds.observers.contains(_)
) && updateThreshold.forall(_ == mds.threshold)
}
if (mustFullyAuthorize) {

View File

@ -19,6 +19,7 @@ import com.digitalasset.canton.console.{
}
import com.digitalasset.canton.crypto.*
import com.digitalasset.canton.crypto.admin.grpc.PrivateKeyMetadata
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.logging.NamedLoggerFactory
import com.digitalasset.canton.time.Clock
import com.digitalasset.canton.topology.store.TopologyStoreId.AuthorizedStore
@ -29,7 +30,7 @@ import com.digitalasset.canton.version.ProtocolVersion
import com.google.protobuf.ByteString
import java.time.Instant
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.ExecutionContext
class SecretKeyAdministration(
instance: InstanceReference,
@ -529,10 +530,12 @@ class LocalSecretKeyAdministration(
)(implicit executionContext: ExecutionContext)
extends SecretKeyAdministration(instance, runner, consoleEnvironment, loggerFactory) {
private def run[V](eitherT: EitherT[Future, String, V], action: String): V = {
private def run[V](eitherT: EitherT[FutureUnlessShutdown, String, V], action: String): V = {
import TraceContext.Implicits.Empty.*
consoleEnvironment.environment.config.parameters.timeouts.processing.default
.await(action)(eitherT.value) match {
.await(action)(
eitherT.onShutdown(throw new RuntimeException("aborted due to shutdown.")).value
) match {
case Left(error) =>
throw new IllegalArgumentException(s"Problem while $action. Error: $error")
case Right(value) => value
@ -551,13 +554,12 @@ class LocalSecretKeyAdministration(
.toRight(
"The selected crypto provider does not support exporting of private keys."
)
.toEitherT[Future]
.toEitherT[FutureUnlessShutdown]
privateKey <- cryptoPrivateStore
.exportPrivateKey(fingerprint)
.leftMap(_.toString)
.subflatMap(_.toRight(s"no private key found for [$fingerprint]"))
.leftMap(err => s"Error retrieving private key [$fingerprint] $err")
.onShutdown(sys.error("aborted due to shutdown"))
publicKey <- crypto.cryptoPublicStore
.publicKey(fingerprint)
.leftMap(_.toString)

View File

@ -4,6 +4,7 @@
package com.digitalasset.canton
import com.digitalasset.canton.console.{HeadlessConsole, InteractiveConsole}
import com.digitalasset.canton.crypto.{Hash, HashAlgorithm, HashPurpose}
import com.digitalasset.canton.environment.Environment
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger}
import com.digitalasset.canton.tracing.{NoTracing, TraceContext}
@ -168,13 +169,15 @@ object ConsoleScriptRunner extends NoTracing {
try {
for {
scriptCode <- cantonScript.read()
scriptHash = normalizedScriptHash(scriptCode)
_ = logger.info(s"Running script ${cantonScript.path} ($scriptHash)")
_ <- HeadlessConsole.run(
consoleEnvironment,
scriptCode,
cantonScript.path,
// clone error stream such that we also log the error message
// unfortunately, this means that if somebody outputs INFO to stdout,
// he will observe the error twice
// they will observe the error twice
transformer = x => x.copy(errorStream = new CopyOutputWriter(x.errorStream, logger)),
logger = logger,
)
@ -183,4 +186,13 @@ object ConsoleScriptRunner extends NoTracing {
consoleEnvironment.closeChannels()
}
}
/** Replace all line endings with \n so that the hash is the same across different platforms */
private def normalizedScriptHash(script: String): Hash = {
val normalized = script.replaceAll("\\r\\n|\\r|\\n", "\n")
Hash
.build(HashPurpose.CantonScript, HashAlgorithm.Sha256)
.add(normalized)
.finish()
}
}

View File

@ -23,16 +23,16 @@ service SequencerService {
//
// The sequencer may or may not accept a request.
// If an incoming request is valid (i.e., it can be parsed and every field meets its documented validity conditions)
// the sequencer will accept requests in a best effort manner, provided resource limits are met.
// an honest sequencer will accept requests in a best effort manner, provided resource limits are met.
//
// For every accepted request, the sequencer assigns a unique sequencing timestamp to the request.
// The sequencer will deliver an event to the sender and to those domain members that are intended to receive at least one envelope.
// All honest sequencers of the domain will deliver an event to the sender and
// to those domain members that are intended to receive at least one envelope.
// An event for a domain member contains only those envelopes of the batch of the request
// that the member is intended to receive and
// the recipients of an envelope contains only those that the member is supposed to learn about.
// A member will receive the envelopes of the same request within a single event.
// A member will receive events ordered by sequencing timestamp.
// A member will only receive events corresponding to prior requests and no other events.
//
// If a request is accepted, the sender will receive a corresponding event, called "receipt";
// so that the sender is informed that the sequencer has accepted the request.
@ -52,6 +52,12 @@ service SequencerService {
// The sequencer will reject any further request that could otherwise be accepted.
// Consequently, events for the request are delivered only once even if the threshold is attained multiple times.
//
// If the request does not contain an aggregation rule, even honest sequencers may deliver events for the request
// more than once (with different sequencing timestamps), as malicious sequencers may replay a request internally.
// Clients need to implement appropriate deduplication, if at-most-once delivery is needed.
// Once request.max_sequencing_time has elapsed (i.e. an event with an equal or higher timestamp has been emitted),
// an honest sequencer will not emit events corresponding to request anymore.
//
// The sequencer may reject a request, e.g., because the request is invalid or the sequencer is overloaded.
// The sequencer will indicate a rejection (independently of whether there is an aggregation rule)
// in exactly one of the following ways:
@ -59,6 +65,8 @@ service SequencerService {
// (2) asynchronously, by emitting an error in the response to SubscribeVersioned to the sender and
// possibly an empty batch to non-sender recipients
// (3) by not emitting a receipt to the sender until request.max_sequencing_time
// Note that only (2) and (3) can be trusted. A malicious sequencer may synchronously return an error and
// still accept the request internally and therefore emit events later on.
rpc SendAsyncVersioned(SendAsyncVersionedRequest) returns (SendAsyncVersionedResponse);
// Submit an unauthenticated request to the sequencer.

View File

@ -84,13 +84,6 @@ message HealthDumpResponse {
bytes chunk = 1; // A chunk of of the health dump file
}
// domain node specific extra status info
message DomainStatusInfo {
repeated string connected_participants = 1;
// optional - only set if a sequencer is being run by the domain
SequencerHealthStatus sequencer = 2;
}
// participant node specific extra status info
message ParticipantStatusInfo {
message ConnectedDomain {
@ -108,6 +101,7 @@ message SequencerNodeStatus {
// required - status of the sequencer component it is running
SequencerHealthStatus sequencer = 2;
string domain_id = 3;
SequencerAdminStatus admin = 4;
}
// status of the sequencer component
@ -118,6 +112,13 @@ message SequencerHealthStatus {
google.protobuf.StringValue details = 2;
}
// status of sequencer node administration
message SequencerAdminStatus {
// Indicate if the sequencer node accepts administration changes, particularly relevant for HA sequencers
// where admin change ability is determined via active-passive model
bool accepts_admin_changes = 1;
}
message MediatorNodeStatus {
string domain_id = 1;
}

View File

@ -9,6 +9,7 @@ import "com/digitalasset/canton/crypto/v30/crypto.proto";
import "com/digitalasset/canton/protocol/v30/common.proto";
import "com/digitalasset/canton/protocol/v30/common_stable.proto";
import "com/digitalasset/canton/protocol/v30/merkle.proto";
import "com/digitalasset/canton/protocol/v30/quorum.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/wrappers.proto";
import "scalapb/scalapb.proto";
@ -56,8 +57,8 @@ message ViewCommonData {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StableProtoVersion";
com.digitalasset.canton.crypto.v30.Salt salt = 1;
repeated Informee informees = 2;
int32 threshold = 3;
repeated string informees = 2;
repeated v30.Quorum quorums = 3;
}
message Informee {
@ -92,6 +93,8 @@ message InformeeMessage {
}
message LightTransactionViewTree {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
GenTransactionTree tree = 1;
repeated bytes subview_hashes = 2;
}

View File

@ -0,0 +1,23 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
syntax = "proto3";
package com.digitalasset.canton.protocol.v30;
// Quorum
message Quorum {
// The list of confirmers that constitute a quorum. Each confirmer is represented by their index in the
// 'informees' field of the 'ViewCommonData' message.
repeated PartyIndexAndWeight party_index_and_weight = 1;
int32 threshold = 2;
}
// PartyIndexAndWeight
message PartyIndexAndWeight {
// The index of the confirming party in the 'informees' field of the 'ViewCommonData' message.
int32 index = 1;
int32 weight = 2;
}

View File

@ -0,0 +1,16 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
syntax = "proto3";
package com.digitalasset.canton.protocol.v30;
import "scalapb/scalapb.proto";
message DynamicSequencingParameters {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
// Sequencing dynamic domain parameters can only be interpreted by a sequencer implementation
// and are opaque to the rest of the domain.
bytes payload = 1;
}

View File

@ -7,6 +7,7 @@ package com.digitalasset.canton.protocol.v30;
import "com/digitalasset/canton/crypto/v30/crypto.proto";
import "com/digitalasset/canton/protocol/v30/domain_parameters.proto";
import "com/digitalasset/canton/protocol/v30/sequencing_parameters.proto";
import "google/protobuf/wrappers.proto";
import "scalapb/scalapb.proto";
@ -234,6 +235,16 @@ message DomainParametersState {
DynamicDomainParameters domain_parameters = 2;
}
// which sequencing dynamic parameters are supposed to be used on the given domain; defaults are used when not set
// authorization: whoever controls the domain
// UNIQUE(domain)
message DynamicSequencingParametersState {
// domain affected by the new domain parameters
string domain = 1;
DynamicSequencingParameters sequencing_parameters = 2;
}
// which mediators and mediator groups are active on the given domain
// authorization: whoever controls the domain
// UNIQUE(domain, group)
@ -307,6 +318,7 @@ message TopologyMapping {
MediatorDomainState mediator_domain_state = 12;
SequencerDomainState sequencer_domain_state = 13;
PurgeTopologyTransaction purge_topology_txs = 14;
DynamicSequencingParametersState sequencing_dynamic_parameters_state = 15;
}
}

View File

@ -3,6 +3,7 @@
package com.digitalasset.canton.config
import com.digitalasset.canton.config.RequireTypes.NonNegativeInt
import com.digitalasset.canton.metrics.MetricsFactoryType
import com.digitalasset.canton.metrics.MetricsFactoryType.External
@ -30,6 +31,8 @@ import com.digitalasset.canton.metrics.MetricsFactoryType.External
* }}}
*
* See also the example in `EngineComputationAbortIntegrationTest`.
* @param maxCommitmentSendDelayMillis The maximum delay for sending commitments in milliseconds. If not set,
* commitment sending is delayed by a random amount at most the default value.
*/
final case class TestingConfigInternal(
testSequencerClientFor: Set[TestSequencerClientFor] = Set.empty,
@ -38,6 +41,7 @@ final case class TestingConfigInternal(
initializeGlobalOpenTelemetry: Boolean = true,
doNotUseCommitmentCachingFor: Set[String] = Set.empty,
reinterpretationTestHookFor: String => () => Unit = _ => () => (),
maxCommitmentSendDelayMillis: Option[NonNegativeInt] = None,
)
/** @param environmentId ID used to disambiguate tests running in parallel

View File

@ -49,7 +49,6 @@ class Crypto(
_ <- cryptoPublicStore
.storeSigningKey(publicKey, name)
.leftMap[SigningKeyGenerationError](SigningKeyGenerationError.SigningPublicStoreError)
.mapK(FutureUnlessShutdown.outcomeK)
} yield publicKey
/** Helper method to generate a new encryption key pair and store the public key in the public store as well. */
@ -66,7 +65,6 @@ class Crypto(
.leftMap[EncryptionKeyGenerationError](
EncryptionKeyGenerationError.EncryptionPublicStoreError
)
.mapK(FutureUnlessShutdown.outcomeK)
} yield publicKey
override def onClosed(): Unit =
@ -148,7 +146,9 @@ trait SyncCryptoApi {
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, SyncCryptoError, Signature]
/** Decrypts a message using the private key of the public key given as the fingerprint. */
/** Decrypts a message using the private key of the public key identified by the fingerprint
* in the AsymmetricEncrypted object.
*/
def decrypt[M](encryptedMessage: AsymmetricEncrypted[M])(
deserialize: ByteString => Either[DeserializationError, M]
)(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, SyncCryptoError, M]

View File

@ -25,6 +25,7 @@ import com.digitalasset.canton.serialization.{
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.*
import com.digitalasset.canton.version.{
HasToByteString,
HasVersionedMessageCompanion,
HasVersionedMessageCompanionDbHelpers,
HasVersionedToByteString,
@ -35,7 +36,7 @@ import com.digitalasset.canton.version.{
import com.google.protobuf.ByteString
import slick.jdbc.GetResult
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.ExecutionContext
/** Encryption operations that do not require access to a private key store but operates with provided keys. */
trait EncryptionOps {
@ -60,13 +61,23 @@ trait EncryptionOps {
scheme: SymmetricKeyScheme = defaultSymmetricKeyScheme,
): Either[EncryptionKeyCreationError, SymmetricKey]
/** Encrypts the given bytes using the given public key */
/** Encrypts the bytes of the serialized message using the given public key.
* The given protocol version determines the message serialization.
*/
def encryptWith[M <: HasVersionedToByteString](
message: M,
publicKey: EncryptionPublicKey,
version: ProtocolVersion,
): Either[EncryptionError, AsymmetricEncrypted[M]]
/** Encrypts the bytes of the serialized message using the given public key.
* Where the message embedded protocol version determines the message serialization.
*/
def encryptWith[M <: HasToByteString](
message: M,
publicKey: EncryptionPublicKey,
): Either[EncryptionError, AsymmetricEncrypted[M]]
/** Deterministically encrypts the given bytes using the given public key.
* This is unsafe for general use and it's only used to encrypt the decryption key of each view
*/
@ -90,13 +101,23 @@ trait EncryptionOps {
message <- decryptWithInternal(encrypted, privateKey)(deserialize)
} yield message
/** Encrypts the given message with the given symmetric key */
/** Encrypts the bytes of the serialized message using the given symmetric key.
* The given protocol version determines the message serialization.
*/
def encryptWith[M <: HasVersionedToByteString](
message: M,
symmetricKey: SymmetricKey,
version: ProtocolVersion,
): Either[EncryptionError, Encrypted[M]]
/** Encrypts the bytes of the serialized message using the given symmetric key.
* Where the message embedded protocol version determines the message serialization.
*/
def encryptWith[M <: HasToByteString](
message: M,
symmetricKey: SymmetricKey,
): Either[EncryptionError, Encrypted[M]]
/** Decrypts a message encrypted using `encryptWith` */
def decryptWith[M](encrypted: Encrypted[M], symmetricKey: SymmetricKey)(
deserialize: ByteString => Either[DeserializationError, M]
@ -149,7 +170,7 @@ trait EncryptionPrivateStoreOps extends EncryptionPrivateOps {
/** Internal method to generate and return the entire encryption key pair */
protected[crypto] def generateEncryptionKeypair(scheme: EncryptionKeyScheme)(implicit
traceContext: TraceContext
): EitherT[Future, EncryptionKeyGenerationError, EncryptionKeyPair]
): EitherT[FutureUnlessShutdown, EncryptionKeyGenerationError, EncryptionKeyPair]
override def generateEncryptionKey(
scheme: EncryptionKeyScheme = defaultEncryptionKeyScheme,
@ -158,7 +179,7 @@ trait EncryptionPrivateStoreOps extends EncryptionPrivateOps {
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, EncryptionKeyGenerationError, EncryptionPublicKey] =
for {
keypair <- generateEncryptionKeypair(scheme).mapK(FutureUnlessShutdown.outcomeK)
keypair <- generateEncryptionKeypair(scheme)
_ <- store
.storeDecryptionKey(keypair.privateKey, name)
.leftMap[EncryptionKeyGenerationError](

View File

@ -90,4 +90,5 @@ object HashPurpose {
val _SetTrafficPurchased = HashPurpose(41, "SetTrafficPurchased")
val OrderingRequestSignature = HashPurpose(42, "OrderingRequestSignature")
val TopologyMappingUniqueKey = HashPurpose(43, "TopologyMappingUniqueKey")
val CantonScript = HashPurpose(44, "CantonScriptHash")
}

View File

@ -31,7 +31,7 @@ import com.digitalasset.canton.version.{
import com.google.protobuf.ByteString
import slick.jdbc.GetResult
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.ExecutionContext
/** Signing operations that do not require access to a private key store but operates with provided keys. */
trait SigningOps {
@ -109,7 +109,7 @@ trait SigningPrivateStoreOps extends SigningPrivateOps {
/** Internal method to generate and return the entire signing key pair */
protected[crypto] def generateSigningKeypair(scheme: SigningKeyScheme)(implicit
traceContext: TraceContext
): EitherT[Future, SigningKeyGenerationError, SigningKeyPair]
): EitherT[FutureUnlessShutdown, SigningKeyGenerationError, SigningKeyPair]
override def generateSigningKey(
scheme: SigningKeyScheme,
@ -118,7 +118,7 @@ trait SigningPrivateStoreOps extends SigningPrivateOps {
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, SigningKeyGenerationError, SigningPublicKey] =
for {
keypair <- generateSigningKeypair(scheme).mapK(FutureUnlessShutdown.outcomeK)
keypair <- generateSigningKeypair(scheme)
_ <- store
.storeSigningKey(keypair.privateKey, name)
.leftMap[SigningKeyGenerationError](SigningKeyGenerationError.SigningPrivateStoreError)
@ -541,6 +541,11 @@ object SignatureCheckError {
param("error", _.error.doubleQuoted),
)
}
final case class InvalidCryptoScheme(message: String) extends SignatureCheckError {
override def pretty: Pretty[InvalidCryptoScheme] = prettyOfClass(
unnamedParam(_.message.unquoted)
)
}
final case class InvalidKeyError(message: String) extends SignatureCheckError {
override def pretty: Pretty[InvalidKeyError] = prettyOfClass(unnamedParam(_.message.unquoted))
}

View File

@ -13,10 +13,12 @@ import cats.syntax.functor.*
import cats.syntax.parallel.*
import cats.syntax.traverse.*
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.checked
import com.digitalasset.canton.concurrent.{FutureSupervisor, HasFutureSupervision}
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.config.{CacheConfig, CachingConfigs, ProcessingTimeout}
import com.digitalasset.canton.crypto.SignatureCheckError.{
InvalidCryptoScheme,
SignatureWithWrongKey,
SignerHasNoValidKeys,
}
@ -30,7 +32,7 @@ import com.digitalasset.canton.lifecycle.{
UnlessShutdown,
}
import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.protocol.DynamicDomainParameters
import com.digitalasset.canton.protocol.{DynamicDomainParameters, StaticDomainParameters}
import com.digitalasset.canton.serialization.DeserializationError
import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex
import com.digitalasset.canton.topology.*
@ -44,7 +46,6 @@ import com.digitalasset.canton.tracing.{TraceContext, TracedScaffeine}
import com.digitalasset.canton.util.FutureInstances.*
import com.digitalasset.canton.util.LoggerUtil
import com.digitalasset.canton.version.{HasVersionedToByteString, ProtocolVersion}
import com.digitalasset.canton.{DomainAlias, checked}
import com.google.protobuf.ByteString
import org.slf4j.event.Level
@ -71,19 +72,26 @@ class SyncCryptoApiProvider(
def pureCrypto: CryptoPureApi = crypto.pureCrypto
def tryForDomain(domain: DomainId, alias: Option[DomainAlias] = None): DomainSyncCryptoClient =
def tryForDomain(
domain: DomainId,
staticDomainParameters: StaticDomainParameters,
): DomainSyncCryptoClient =
new DomainSyncCryptoClient(
member,
domain,
ips.tryForDomain(domain),
crypto,
cachingConfigs,
staticDomainParameters,
timeouts,
futureSupervisor,
loggerFactory.append("domainId", domain.toString),
)
def forDomain(domain: DomainId): Option[DomainSyncCryptoClient] =
def forDomain(
domain: DomainId,
staticDomainParameters: StaticDomainParameters,
): Option[DomainSyncCryptoClient] =
for {
dips <- ips.forDomain(domain)
} yield new DomainSyncCryptoClient(
@ -92,6 +100,7 @@ class SyncCryptoApiProvider(
dips,
crypto,
cachingConfigs,
staticDomainParameters,
timeouts,
futureSupervisor,
loggerFactory,
@ -326,6 +335,7 @@ class DomainSyncCryptoClient(
val ips: DomainTopologyClient,
val crypto: Crypto,
cacheConfigs: CachingConfigs,
val staticDomainParameters: StaticDomainParameters,
override val timeouts: ProcessingTimeout,
override protected val futureSupervisor: FutureSupervisor,
override val loggerFactory: NamedLoggerFactory,
@ -365,17 +375,17 @@ class DomainSyncCryptoClient(
): FutureUnlessShutdown[DomainSnapshotSyncCryptoApi] =
ips.awaitSnapshotUS(timestamp).map(create)
private def create(snapshot: TopologySnapshot): DomainSnapshotSyncCryptoApi = {
private def create(snapshot: TopologySnapshot): DomainSnapshotSyncCryptoApi =
new DomainSnapshotSyncCryptoApi(
member,
domainId,
staticDomainParameters,
snapshot,
crypto,
implicit tc => ts => EitherT(FutureUnlessShutdown(mySigningKeyCache.get(ts))),
cacheConfigs.keyCache,
loggerFactory,
)
}
private val mySigningKeyCache =
TracedScaffeine.buildTracedAsyncFuture[CantonTimestamp, UnlessShutdown[
@ -456,6 +466,7 @@ class DomainSyncCryptoClient(
class DomainSnapshotSyncCryptoApi(
val member: Member,
val domainId: DomainId,
staticDomainParameters: StaticDomainParameters,
override val ipsSnapshot: TopologySnapshot,
val crypto: Crypto,
fetchSigningKey: TraceContext => CantonTimestamp => EitherT[
@ -523,18 +534,26 @@ class DomainSnapshotSyncCryptoApi(
val error =
if (validKeys.isEmpty)
SignerHasNoValidKeys(
s"There are no valid keys for ${signerStr_} but received message signed with ${signature.signedBy}"
s"There are no valid keys for $signerStr_ but received message signed with ${signature.signedBy}"
)
else
SignatureWithWrongKey(
s"Key ${signature.signedBy} used to generate signature is not a valid key for ${signerStr_}. Valid keys are ${validKeys.values
s"Key ${signature.signedBy} used to generate signature is not a valid key for $signerStr_. Valid keys are ${validKeys.values
.map(_.fingerprint.unwrap)}"
)
Left(error)
}
validKeys.get(signature.signedBy) match {
case Some(key) =>
crypto.pureCrypto.verifySignature(hash, key, signature)
if (staticDomainParameters.requiredSigningKeySchemes.contains(key.scheme))
crypto.pureCrypto.verifySignature(hash, key, signature)
else
Left(
InvalidCryptoScheme(
s"The signing key scheme ${key.scheme} is not part of the " +
s"required schemes: ${staticDomainParameters.requiredSigningKeySchemes}"
)
)
case None =>
signatureCheckFailed()
}
@ -676,11 +695,10 @@ class DomainSnapshotSyncCryptoApi(
override def decrypt[M](encryptedMessage: AsymmetricEncrypted[M])(
deserialize: ByteString => Either[DeserializationError, M]
)(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, SyncCryptoError, M] = {
)(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, SyncCryptoError, M] =
crypto.privateCrypto
.decrypt(encryptedMessage)(deserialize)
.leftMap[SyncCryptoError](err => SyncCryptoError.SyncCryptoDecryptionError(err))
}
/** Encrypts a message for the given members
*

View File

@ -25,7 +25,7 @@ import com.digitalasset.canton.version.ReleaseProtocolVersion
import com.google.common.annotations.VisibleForTesting
import scala.collection.concurrent.TrieMap
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.ExecutionContext
/** Extends a CryptoPrivateStore with the necessary store write/read operations and is intended to be used by canton
* internal private crypto stores (e.g. [[com.digitalasset.canton.crypto.store.memory.InMemoryCryptoPrivateStore]],
@ -71,7 +71,7 @@ trait CryptoPrivateStoreExtended extends CryptoPrivateStore { this: NamedLogging
private[crypto] def deletePrivateKey(keyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPrivateStoreError, Unit]
): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Unit]
def storePrivateKey(key: PrivateKey, name: Option[KeyName])(implicit
traceContext: TraceContext
@ -121,7 +121,6 @@ trait CryptoPrivateStoreExtended extends CryptoPrivateStore { this: NamedLogging
deletedDecKey.foreach(decryptionKeyMap.put(keyId, _))
err
}
.mapK(FutureUnlessShutdown.outcomeK)
}
private def readAndParsePrivateKey[A <: PrivateKey, B <: PrivateKeyWithName](

View File

@ -11,6 +11,7 @@ import com.digitalasset.canton.crypto.store.db.DbCryptoPublicStore
import com.digitalasset.canton.crypto.store.memory.InMemoryCryptoPublicStore
import com.digitalasset.canton.crypto.{KeyName, *}
import com.digitalasset.canton.error.{BaseCantonError, CantonErrorGroups}
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.logging.NamedLoggerFactory
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage}
@ -19,7 +20,7 @@ import com.digitalasset.canton.version.ReleaseProtocolVersion
import com.google.common.annotations.VisibleForTesting
import scala.collection.concurrent.TrieMap
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.ExecutionContext
/** Store for all public cryptographic material such as certificates or public keys. */
trait CryptoPublicStore extends AutoCloseable {
@ -33,14 +34,14 @@ trait CryptoPublicStore extends AutoCloseable {
// Write methods that the underlying store has to implement for the caching
protected def writeSigningKey(key: SigningPublicKey, name: Option[KeyName])(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Unit]
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit]
protected def writeEncryptionKey(key: EncryptionPublicKey, name: Option[KeyName])(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Unit]
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit]
protected[crypto] def listAllKeyFingerprints(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Set[Fingerprint]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[Fingerprint]] =
for {
signingKeys <- listSigningKeys
encryptionKeys <- listEncryptionKeys
@ -49,15 +50,15 @@ trait CryptoPublicStore extends AutoCloseable {
@VisibleForTesting
private[store] def listSigningKeys(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Set[SigningPublicKeyWithName]]
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[SigningPublicKeyWithName]]
@VisibleForTesting
private[store] def listEncryptionKeys(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Set[EncryptionPublicKeyWithName]]
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[EncryptionPublicKeyWithName]]
def storePublicKey(publicKey: PublicKey, name: Option[KeyName])(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Unit] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit] =
(publicKey: @unchecked) match {
case sigKey: SigningPublicKey => storeSigningKey(sigKey, name)
case encKey: EncryptionPublicKey => storeEncryptionKey(encKey, name)
@ -65,12 +66,12 @@ trait CryptoPublicStore extends AutoCloseable {
def publicKey(keyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Option[PublicKey]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[PublicKey]] =
publicKeyWithName(keyId).map(_.map(_.publicKey))
def publicKeyWithName(keyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Option[PublicKeyWithName]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[PublicKeyWithName]] =
for {
sigKeyOption <- readSigningKey(keyId)
pubKeyOption <- sigKeyOption.fold(readEncryptionKey(keyId).widen[Option[PublicKeyWithName]])(
@ -80,7 +81,7 @@ trait CryptoPublicStore extends AutoCloseable {
def existsPublicKey(keyId: Fingerprint, purpose: KeyPurpose)(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Boolean] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Boolean] =
purpose match {
case KeyPurpose.Signing => signingKey(keyId).map(_.nonEmpty)
case KeyPurpose.Encryption => encryptionKey(keyId).map(_.nonEmpty)
@ -88,22 +89,27 @@ trait CryptoPublicStore extends AutoCloseable {
def findSigningKeyIdByName(keyName: KeyName)(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Option[SigningPublicKey]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[SigningPublicKey]] =
listSigningKeys.map(_.find(_.name.contains(keyName)).map(_.publicKey))
def findSigningKeyIdByFingerprint(fingerprint: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Option[SigningPublicKey]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[SigningPublicKey]] =
listSigningKeys.map(_.find(_.publicKey.fingerprint == fingerprint).map(_.publicKey))
def findEncryptionKeyIdByName(keyName: KeyName)(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Option[EncryptionPublicKey]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[EncryptionPublicKey]] =
listEncryptionKeys.map(_.find(_.name.contains(keyName)).map(_.publicKey))
def findEncryptionKeyIdByFingerprint(fingerprint: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[EncryptionPublicKey]] =
listEncryptionKeys.map(_.find(_.publicKey.fingerprint == fingerprint).map(_.publicKey))
def publicKeysWithName(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Set[PublicKeyWithName]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[PublicKeyWithName]] =
for {
sigKeys <- listSigningKeys
encKeys <- listEncryptionKeys
@ -111,42 +117,42 @@ trait CryptoPublicStore extends AutoCloseable {
def signingKey(signingKeyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Option[SigningPublicKey]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[SigningPublicKey]] =
retrieveKeyAndUpdateCache(signingKeyMap, readSigningKey(_))(signingKeyId)
protected def readSigningKey(signingKeyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Option[SigningPublicKeyWithName]]
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[SigningPublicKeyWithName]]
def signingKeys(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Set[SigningPublicKey]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[SigningPublicKey]] =
retrieveKeysAndUpdateCache(listSigningKeys, signingKeyMap)
def storeSigningKey(key: SigningPublicKey, name: Option[KeyName] = None)(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Unit] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit] =
writeSigningKey(key, name).map { _ =>
val _ = signingKeyMap.put(key.id, SigningPublicKeyWithName(key, name))
}
def encryptionKey(encryptionKeyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Option[EncryptionPublicKey]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[EncryptionPublicKey]] =
retrieveKeyAndUpdateCache(encryptionKeyMap, readEncryptionKey(_))(encryptionKeyId)
protected def readEncryptionKey(encryptionKeyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Option[EncryptionPublicKeyWithName]]
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[EncryptionPublicKeyWithName]]
def encryptionKeys(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Set[EncryptionPublicKey]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[EncryptionPublicKey]] =
retrieveKeysAndUpdateCache(listEncryptionKeys, encryptionKeyMap)
def storeEncryptionKey(key: EncryptionPublicKey, name: Option[KeyName] = None)(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Unit] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit] =
writeEncryptionKey(key, name)
.map { _ =>
val _ = encryptionKeyMap.put(key.id, EncryptionPublicKeyWithName(key, name))
@ -154,8 +160,8 @@ trait CryptoPublicStore extends AutoCloseable {
private def retrieveKeyAndUpdateCache[KN <: PublicKeyWithName](
cache: TrieMap[Fingerprint, KN],
readKey: Fingerprint => EitherT[Future, CryptoPublicStoreError, Option[KN]],
)(keyId: Fingerprint): EitherT[Future, CryptoPublicStoreError, Option[KN#K]] =
readKey: Fingerprint => EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[KN]],
)(keyId: Fingerprint): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[KN#K]] =
cache.get(keyId) match {
case Some(value) => EitherT.rightT(Some(value.publicKey))
case None =>
@ -166,9 +172,9 @@ trait CryptoPublicStore extends AutoCloseable {
}
private def retrieveKeysAndUpdateCache[KN <: PublicKeyWithName](
keysFromDb: EitherT[Future, CryptoPublicStoreError, Set[KN]],
keysFromDb: EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[KN]],
cache: TrieMap[Fingerprint, KN],
): EitherT[Future, CryptoPublicStoreError, Set[KN#K]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[KN#K]] =
for {
// we always rebuild the cache here just in case new keys have been added by another process
// this should not be a problem since these operations to get all keys are infrequent and typically

View File

@ -25,7 +25,7 @@ import slick.dbio.DBIOAction
import slick.jdbc.GetResult
import slick.sql.SqlAction
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.ExecutionContext
/** Represents the data to be stored in the crypto_private_keys table.
* If wrapperKeyId is set (Some(wrapperKeyId)) then the data field is encrypted
@ -102,7 +102,7 @@ class DbCryptoPrivateStore(
private def insertKey(key: StoredPrivateKey)(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPrivateStoreError, Unit] = {
): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Unit] = {
def equalKeys(existingKey: StoredPrivateKey, newKey: StoredPrivateKey): Boolean = {
if (existingKey.wrapperKeyId.isEmpty) {
@ -119,13 +119,13 @@ class DbCryptoPrivateStore(
for {
inserted <- EitherT.right(
storage.update(insertKeyUpdate(key), functionFullName)
storage.updateUnlessShutdown(insertKeyUpdate(key), functionFullName)
)
res <-
if (inserted == 0) {
// If no key was inserted by the insert query, check that the existing value matches
storage
.querySingle(queryKey(key.id, key.purpose), functionFullName)
.querySingleUnlessShutdown(queryKey(key.id, key.purpose), functionFullName)
// If we don't find the duplicate key, it may have been concurrently deleted and we could retry to insert it.
.toRight(
CryptoPrivateStoreError
@ -133,14 +133,14 @@ class DbCryptoPrivateStore(
)
.flatMap { existingKey =>
EitherT
.cond[Future](
.cond[FutureUnlessShutdown](
equalKeys(existingKey, key),
(),
CryptoPrivateStoreError.KeyAlreadyExists(key.id, existingKey.name.map(_.unwrap)),
)
.leftWiden[CryptoPrivateStoreError]
}
} else EitherT.rightT[Future, CryptoPrivateStoreError](())
} else EitherT.rightT[FutureUnlessShutdown, CryptoPrivateStoreError](())
} yield res
}
@ -167,7 +167,7 @@ class DbCryptoPrivateStore(
)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Unit] =
insertKey(key).mapK(FutureUnlessShutdown.outcomeK)
insertKey(key)
@VisibleForTesting
private[canton] def listPrivateKeys(purpose: KeyPurpose, encrypted: Boolean)(implicit
@ -208,10 +208,10 @@ class DbCryptoPrivateStore(
private[crypto] def deletePrivateKey(keyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPrivateStoreError, Unit] =
): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Unit] =
EitherTUtil.fromFuture(
storage
.update_(deleteKey(keyId), functionFullName),
.updateUnlessShutdown_(deleteKey(keyId), functionFullName),
err => CryptoPrivateStoreError.FailedToDeleteKey(keyId, err.toString),
)
@ -234,10 +234,10 @@ class DbCryptoPrivateStore(
private[crypto] def getWrapperKeyId()(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPrivateStoreError, Option[String300]] =
): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Option[String300]] =
EitherTUtil
.fromFuture(
storage.query(
storage.queryUnlessShutdown(
{
sql"select distinct wrapper_key_id from common_crypto_private_keys"
.as[Option[String300]]

View File

@ -9,6 +9,7 @@ import com.daml.nameof.NameOf.functionFullName
import com.digitalasset.canton.config.ProcessingTimeout
import com.digitalasset.canton.crypto.*
import com.digitalasset.canton.crypto.store.*
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.logging.NamedLoggerFactory
import com.digitalasset.canton.resource.DbStorage.DbAction
import com.digitalasset.canton.resource.{DbStorage, DbStore}
@ -17,7 +18,7 @@ import com.digitalasset.canton.util.EitherTUtil
import com.digitalasset.canton.version.ReleaseProtocolVersion
import slick.jdbc.{GetResult, SetParameter}
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.ExecutionContext
class DbCryptoPublicStore(
override protected val storage: DbStorage,
@ -68,35 +69,39 @@ class DbCryptoPublicStore(
private def insertKey[K <: PublicKey: SetParameter, KN <: PublicKeyWithName: GetResult](
key: K,
name: Option[KeyName],
)(implicit traceContext: TraceContext): EitherT[Future, CryptoPublicStoreError, Unit] =
)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit] =
for {
inserted <- EitherT.right(storage.update(insertKeyUpdate(key, name), functionFullName))
inserted <- EitherT.right(
storage.updateUnlessShutdown(insertKeyUpdate(key, name), functionFullName)
)
res <-
if (inserted == 0) {
// If no key was inserted by the insert query, check that the existing value matches
storage
.querySingle(queryKey(key.id, key.purpose), functionFullName)
.querySingleUnlessShutdown(queryKey(key.id, key.purpose), functionFullName)
.toRight(
CryptoPublicStoreError.FailedToInsertKey(key.id, "No key inserted and no key found")
)
.flatMap { existingKey =>
EitherT
.cond[Future](
.cond[FutureUnlessShutdown](
existingKey.publicKey == key && existingKey.name == name,
(),
CryptoPublicStoreError.KeyAlreadyExists(key.id, existingKey.name.map(_.unwrap)),
)
.leftWiden[CryptoPublicStoreError]
}
} else EitherT.rightT[Future, CryptoPublicStoreError](())
} else EitherT.rightT[FutureUnlessShutdown, CryptoPublicStoreError](())
} yield res
override def readSigningKey(signingKeyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Option[SigningPublicKeyWithName]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[SigningPublicKeyWithName]] =
EitherTUtil.fromFuture(
storage
.querySingle(
.querySingleUnlessShutdown(
queryKey[SigningPublicKeyWithName](signingKeyId, KeyPurpose.Signing),
functionFullName,
)
@ -106,10 +111,10 @@ class DbCryptoPublicStore(
override def readEncryptionKey(encryptionKeyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Option[EncryptionPublicKeyWithName]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[EncryptionPublicKeyWithName]] =
EitherTUtil.fromFuture(
storage
.querySingle(
.querySingleUnlessShutdown(
queryKey[EncryptionPublicKeyWithName](encryptionKeyId, KeyPurpose.Encryption),
functionFullName,
)
@ -119,29 +124,35 @@ class DbCryptoPublicStore(
override protected def writeSigningKey(key: SigningPublicKey, name: Option[KeyName])(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Unit] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit] =
insertKey[SigningPublicKey, SigningPublicKeyWithName](key, name)
override protected def writeEncryptionKey(key: EncryptionPublicKey, name: Option[KeyName])(
implicit traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Unit] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit] =
insertKey[EncryptionPublicKey, EncryptionPublicKeyWithName](key, name)
override private[store] def listSigningKeys(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Set[SigningPublicKeyWithName]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[SigningPublicKeyWithName]] =
EitherTUtil.fromFuture(
storage.query(queryKeys[SigningPublicKeyWithName](KeyPurpose.Signing), functionFullName),
storage.queryUnlessShutdown(
queryKeys[SigningPublicKeyWithName](KeyPurpose.Signing),
functionFullName,
),
err => CryptoPublicStoreError.FailedToListKeys(err.toString),
)
override private[store] def listEncryptionKeys(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Set[EncryptionPublicKeyWithName]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[EncryptionPublicKeyWithName]] =
EitherTUtil
.fromFuture(
storage
.query(queryKeys[EncryptionPublicKeyWithName](KeyPurpose.Encryption), functionFullName),
.queryUnlessShutdown(
queryKeys[EncryptionPublicKeyWithName](KeyPurpose.Encryption),
functionFullName,
),
err => CryptoPublicStoreError.FailedToListKeys(err.toString),
)
}

View File

@ -173,7 +173,9 @@ class InMemoryCryptoPrivateStore(
private[crypto] def deletePrivateKey(
keyId: Fingerprint
)(implicit traceContext: TraceContext): EitherT[Future, CryptoPrivateStoreError, Unit] = {
)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, CryptoPrivateStoreError, Unit] = {
storedSigningKeyMap.remove(keyId).discard
storedDecryptionKeyMap.remove(keyId).discard
EitherT.rightT(())
@ -185,7 +187,7 @@ class InMemoryCryptoPrivateStore(
newKeys
.parTraverse { newKey =>
for {
_ <- deletePrivateKey(newKey.id).mapK(FutureUnlessShutdown.outcomeK)
_ <- deletePrivateKey(newKey.id)
_ <- writePrivateKey(newKey)
} yield ()
}

View File

@ -7,11 +7,12 @@ import cats.data.EitherT
import cats.syntax.either.*
import com.digitalasset.canton.crypto.store.{CryptoPublicStore, CryptoPublicStoreError}
import com.digitalasset.canton.crypto.{KeyName, *}
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.TrieMapUtil
import scala.collection.concurrent.TrieMap
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.ExecutionContext
class InMemoryCryptoPublicStore(override implicit val ec: ExecutionContext)
extends CryptoPublicStore {
@ -29,7 +30,7 @@ class InMemoryCryptoPublicStore(override implicit val ec: ExecutionContext)
override protected def writeSigningKey(key: SigningPublicKey, name: Option[KeyName])(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Unit] = {
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit] = {
TrieMapUtil
.insertIfAbsent(
storedSigningKeyMap,
@ -42,17 +43,17 @@ class InMemoryCryptoPublicStore(override implicit val ec: ExecutionContext)
override def readSigningKey(signingKeyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Option[SigningPublicKeyWithName]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[SigningPublicKeyWithName]] =
EitherT.rightT(storedSigningKeyMap.get(signingKeyId))
override def readEncryptionKey(encryptionKeyId: Fingerprint)(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Option[EncryptionPublicKeyWithName]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[EncryptionPublicKeyWithName]] =
EitherT.rightT(storedEncryptionKeyMap.get(encryptionKeyId))
override protected def writeEncryptionKey(key: EncryptionPublicKey, name: Option[KeyName])(
implicit traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Unit] = {
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Unit] = {
TrieMapUtil
.insertIfAbsent(
storedEncryptionKeyMap,
@ -65,12 +66,12 @@ class InMemoryCryptoPublicStore(override implicit val ec: ExecutionContext)
override private[store] def listSigningKeys(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Set[SigningPublicKeyWithName]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[SigningPublicKeyWithName]] =
EitherT.rightT(storedSigningKeyMap.values.toSet)
override private[store] def listEncryptionKeys(implicit
traceContext: TraceContext
): EitherT[Future, CryptoPublicStoreError, Set[EncryptionPublicKeyWithName]] =
): EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Set[EncryptionPublicKeyWithName]] =
EitherT.rightT(storedEncryptionKeyMap.values.toSet)
override def close(): Unit = ()

View File

@ -101,7 +101,7 @@ object ActionDescription extends HasProtocolVersionedCompanion[ActionDescription
case LfNodeCreate(
contractId,
_packageName,
_packageVersion,
_packageVersion,
_templateId,
_arg,
_agreementText,

View File

@ -4,8 +4,8 @@
package com.digitalasset.canton.data
import cats.syntax.either.*
import cats.syntax.functor.*
import com.digitalasset.canton.*
import com.digitalasset.canton.config.RequireTypes.NonNegativeInt
import com.digitalasset.canton.crypto.*
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.protocol.{v30, *}
@ -44,15 +44,12 @@ final case class FullInformeeTree private (tree: GenTransactionTree)(
lazy val domainId: DomainId = commonMetadata.domainId
lazy val mediator: MediatorGroupRecipient = commonMetadata.mediator
lazy val informeesAndThresholdByViewPosition: Map[ViewPosition, (Set[Informee], NonNegativeInt)] =
FullInformeeTree.viewCommonDataByViewPosition(tree).map { case (position, viewCommonData) =>
position -> ((viewCommonData.informees, viewCommonData.threshold))
}
lazy val informeesAndThresholdByViewPosition: Map[ViewPosition, ViewConfirmationParameters] =
FullInformeeTree.viewCommonDataByViewPosition(tree).fmap(_.viewConfirmationParameters)
lazy val allInformees: Set[LfPartyId] = FullInformeeTree
.viewCommonDataByViewPosition(tree)
.flatMap { case (_, viewCommonData) => viewCommonData.informees }
.map(_.party)
.flatMap { case (_, viewCommonData) => viewCommonData.viewConfirmationParameters.informees }
.toSet
lazy val transactionUuid: UUID = checked(tree.commonMetadata.tryUnwrap).uuid

View File

@ -172,8 +172,11 @@ final case class GenTransactionTree private (
} yield FullTransactionViewTree.tryCreate(genTransactionTree)
def allLightTransactionViewTrees(
protocolVersion: ProtocolVersion
): Seq[LightTransactionViewTree] =
allTransactionViewTrees.map(LightTransactionViewTree.fromTransactionViewTree)
allTransactionViewTrees.map(tvt =>
LightTransactionViewTree.fromTransactionViewTree(tvt, protocolVersion)
)
/** All lightweight transaction trees in this [[GenTransactionTree]], accompanied by their witnesses and randomness
* suitable for deriving encryption keys for encrypted view messages.
@ -190,6 +193,7 @@ final case class GenTransactionTree private (
def allLightTransactionViewTreesWithWitnessesAndSeeds(
initSeed: SecureRandomness,
hkdfOps: HkdfOps,
protocolVersion: ProtocolVersion,
): Either[HkdfError, Seq[(LightTransactionViewTree, Witnesses, SecureRandomness)]] = {
val randomnessLength = initSeed.unwrap.size
val witnessAndSeedMapE =
@ -221,7 +225,7 @@ final case class GenTransactionTree private (
witnessAndSeedMapE.map { witnessAndSeedMap =>
allTransactionViewTrees.map { tvt =>
val (witnesses, seed) = witnessAndSeedMap(tvt.viewPosition)
(LightTransactionViewTree.fromTransactionViewTree(tvt), witnesses, seed)
(LightTransactionViewTree.fromTransactionViewTree(tvt, protocolVersion), witnesses, seed)
}
}
}

View File

@ -1,97 +0,0 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.data
import cats.syntax.either.*
import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt}
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.protocol.v30
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.{LfPartyId, ProtoDeserializationError}
/** A party that must be informed about the view.
*/
// This class is a reference example of serialization best practices.
// In particular, it demonstrates serializing a trait with different subclasses.
// The design is quite simple. It should be applied whenever possible, but it will not cover all cases.
//
// Please consult the team if you intend to change the design of serialization.
sealed trait Informee extends Product with Serializable with PrettyPrinting {
def party: LfPartyId
/** Determines how much "impact" the informee has on approving / rejecting the underlying view.
*
* Positive value: confirming party
* Zero: plain informee, who sees the underlying view, but has no impact on approving / rejecting it
*/
def weight: NonNegativeInt
/** Yields an informee resulting from adding `delta` to `weight`.
*
* If the new weight is zero, the resulting informee will be a plain informee;
* in thise case, the resulting informee will have trust level ORDINARY irrespective of the trust level of this.
*/
def withAdditionalWeight(delta: NonNegativeInt): Informee
/** Plain informees get weight 0.
* Confirming parties get their assigned (positive) weight.
*/
private[data] def toProtoV30: v30.Informee =
v30.Informee(
party = party,
weight = weight.unwrap,
)
override def pretty: Pretty[Informee] =
prettyOfString(inst => show"${inst.party}*${inst.weight}")
}
object Informee {
def create(
party: LfPartyId,
weight: NonNegativeInt,
): Informee =
if (weight == NonNegativeInt.zero) PlainInformee(party)
else ConfirmingParty(party, PositiveInt.tryCreate(weight.unwrap))
private[data] def fromProtoV30(informeeP: v30.Informee): ParsingResult[Informee] = {
val v30.Informee(partyP, weightP) = informeeP
for {
party <- LfPartyId
.fromString(partyP)
.leftMap(ProtoDeserializationError.ValueDeserializationError("party", _))
weight <- NonNegativeInt
.create(weightP)
.leftMap(err => ProtoDeserializationError.InvariantViolation(err.message))
} yield Informee.create(party, weight)
}
}
/** A party that must confirm the underlying view.
*
* @param weight determines the impact of the party on whether the view is approved.
*/
final case class ConfirmingParty(
party: LfPartyId,
partyWeight: PositiveInt,
) extends Informee {
val weight: NonNegativeInt = partyWeight.toNonNegative
def withAdditionalWeight(delta: NonNegativeInt): Informee = {
copy(partyWeight = partyWeight + delta)
}
}
/** An informee that is not a confirming party
*/
final case class PlainInformee(party: LfPartyId) extends Informee {
override val weight: NonNegativeInt = NonNegativeInt.zero
def withAdditionalWeight(delta: NonNegativeInt): Informee =
if (delta == NonNegativeInt.zero) this
else ConfirmingParty(party, PositiveInt.tryCreate(delta.unwrap))
}

View File

@ -31,8 +31,12 @@ import scala.collection.mutable
sealed abstract case class LightTransactionViewTree private[data] (
tree: GenTransactionTree,
override val subviewHashes: Seq[ViewHash],
)(
override val representativeProtocolVersion: RepresentativeProtocolVersion[
LightTransactionViewTree.type
]
) extends TransactionViewTree
with HasVersionedWrapper[LightTransactionViewTree]
with HasProtocolVersionedWrapper[LightTransactionViewTree]
with PrettyPrinting {
@tailrec
@ -71,7 +75,8 @@ sealed abstract case class LightTransactionViewTree private[data] (
} yield this
override protected def companionObj = LightTransactionViewTree
@transient override protected lazy val companionObj: LightTransactionViewTree.type =
LightTransactionViewTree
def toProtoV30: v30.LightTransactionViewTree =
v30.LightTransactionViewTree(
@ -83,16 +88,15 @@ sealed abstract case class LightTransactionViewTree private[data] (
}
object LightTransactionViewTree
extends HasVersionedMessageWithContextCompanion[
extends HasProtocolVersionedWithContextAndValidationCompanion[
LightTransactionViewTree,
(HashOps, ProtocolVersion),
HashOps,
] {
override val name: String = "LightTransactionViewTree"
val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions(
ProtoVersion(30) -> ProtoCodec(
ProtocolVersion.v31,
supportedProtoVersion(v30.LightTransactionViewTree)(fromProtoV30),
ProtoVersion(30) -> VersionedProtoConverter(ProtocolVersion.v31)(v30.LightTransactionViewTree)(
supportedProtoVersion(_)((context, proto) => fromProtoV30(context)(proto)),
_.toProtoV30.toByteString,
)
)
@ -105,25 +109,29 @@ object LightTransactionViewTree
def tryCreate(
tree: GenTransactionTree,
subviewHashes: Seq[ViewHash],
protocolVersion: ProtocolVersion,
): LightTransactionViewTree =
create(tree, subviewHashes).valueOr(err => throw InvalidLightTransactionViewTree(err))
create(tree, subviewHashes, protocolVersionRepresentativeFor(protocolVersion)).valueOr(err =>
throw InvalidLightTransactionViewTree(err)
)
def create(
tree: GenTransactionTree,
subviewHashes: Seq[ViewHash],
representativeProtocolVersion: RepresentativeProtocolVersion[LightTransactionViewTree.type],
): Either[String, LightTransactionViewTree] =
new LightTransactionViewTree(tree, subviewHashes) {}.validated
new LightTransactionViewTree(tree, subviewHashes)(representativeProtocolVersion) {}.validated
private def fromProtoV30(
context: (HashOps, ProtocolVersion),
protoT: v30.LightTransactionViewTree,
private def fromProtoV30(context: (HashOps, ProtocolVersion))(
protoT: v30.LightTransactionViewTree
): ParsingResult[LightTransactionViewTree] =
for {
protoTree <- ProtoConverter.required("tree", protoT.tree)
tree <- GenTransactionTree.fromProtoV30(context, protoTree)
subviewHashes <- protoT.subviewHashes.traverse(ViewHash.fromProtoPrimitive)
rpv <- protocolVersionRepresentativeFor(ProtoVersion(30))
result <- LightTransactionViewTree
.create(tree, subviewHashes)
.create(tree, subviewHashes, rpv)
.leftMap(e =>
ProtoDeserializationError.InvariantViolation(s"Unable to create transaction tree: $e")
)
@ -216,13 +224,14 @@ object LightTransactionViewTree
/** Turns a full transaction view tree into a lightweight one. Not stack-safe. */
def fromTransactionViewTree(
tvt: FullTransactionViewTree
tvt: FullTransactionViewTree,
protocolVersion: ProtocolVersion,
): LightTransactionViewTree = {
val withBlindedSubviews = tvt.view.copy(subviews = tvt.view.subviews.blindFully)
val genTransactionTree =
tvt.tree.mapUnblindedRootViews(_.replace(tvt.viewHash, withBlindedSubviews))
// By definition, the view in a TransactionViewTree has all subviews unblinded
LightTransactionViewTree.tryCreate(genTransactionTree, tvt.subviewHashes)
LightTransactionViewTree.tryCreate(genTransactionTree, tvt.subviewHashes, protocolVersion)
}
}

View File

@ -28,7 +28,6 @@ object ProcessedDisclosedContract {
def apply(
templateId: Ref.Identifier,
packageName: Ref.PackageName,
packageVersion: Option[Ref.PackageVersion],
contractId: Value.ContractId,
argument: Value,
createdAt: Time.Timestamp,
@ -42,7 +41,6 @@ object ProcessedDisclosedContract {
create = Node.Create(
templateId = templateId,
packageName = packageName,
packageVersion = packageVersion,
coid = contractId,
arg = argument,
signatories = signatories,

View File

@ -0,0 +1,86 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.data
import cats.syntax.traverse.*
import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt}
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.protocol.v30
import com.digitalasset.canton.serialization.ProtoConverter.{
ParsingResult,
parseNonNegativeInt,
parsePositiveInt,
}
import com.digitalasset.canton.{LfPartyId, ProtoDeserializationError}
/** A set of confirming parties and their weights plus a threshold constitutes a quorum.
*
* @param confirmers maps a party id to a weight. The weight is a positive int because
* only PlainInformees have a weight of 0.
*/
final case class Quorum(
confirmers: Map[LfPartyId, PositiveInt],
threshold: NonNegativeInt,
) extends PrettyPrinting {
override def pretty: Pretty[Quorum] = prettyOfClass(
param("confirmers", _.confirmers),
param("threshold", _.threshold),
)
private[data] def tryToProtoV30(informees: Seq[LfPartyId]): v30.Quorum =
v30.Quorum(
partyIndexAndWeight = confirmers.map { case (confirmingParty, weight) =>
v30.PartyIndexAndWeight(
index = {
val index = informees.indexOf(confirmingParty)
if (index < 0) {
/* this is only called by ViewCommonData.toProto, which itself ensures that, when it's created
* or deserialized, the informees' list contains the confirming party
*/
throw new IndexOutOfBoundsException(
s"$confirmingParty is not part of the informees list $informees"
)
}
index
},
weight = weight.unwrap,
)
}.toSeq,
threshold = threshold.unwrap,
)
}
object Quorum {
lazy val empty: Quorum = Quorum(Map.empty, NonNegativeInt.zero)
def fromProtoV30(
quorumP: v30.Quorum,
informees: Seq[LfPartyId],
): ParsingResult[Quorum] = {
val v30.Quorum(partyIndexAndWeightsP, thresholdP) = quorumP
for {
confirmers <- partyIndexAndWeightsP
.traverse { partyIndexAndWeight =>
val v30.PartyIndexAndWeight(indexP, weightP) = partyIndexAndWeight
for {
weight <- parsePositiveInt(weightP)
confirmingParty <-
Either.cond(
0 <= indexP && indexP < informees.size, {
val partyId = informees(indexP)
partyId -> weight
},
ProtoDeserializationError.OtherError(
s"Invalid index $indexP for informees list size ${informees.size}"
),
)
} yield confirmingParty
}
threshold <- parseNonNegativeInt(thresholdP)
} yield new Quorum(confirmers.toMap, threshold)
}
}

View File

@ -4,12 +4,12 @@
package com.digitalasset.canton.data
import cats.syntax.either.*
import com.digitalasset.canton.WorkflowId
import com.digitalasset.canton.data.ViewPosition.MerklePathElement
import com.digitalasset.canton.protocol.{ConfirmationPolicy, RootHash, TransactionId, ViewHash}
import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient
import com.digitalasset.canton.topology.DomainId
import com.digitalasset.canton.util.EitherUtil
import com.digitalasset.canton.{LfPartyId, WorkflowId}
import java.util.UUID
@ -57,7 +57,8 @@ trait TransactionViewTree extends ViewTree {
override lazy val viewHash: ViewHash = ViewHash.fromRootHash(view.rootHash)
override lazy val informees: Set[Informee] = view.viewCommonData.tryUnwrap.informees
override lazy val informees: Set[LfPartyId] =
view.viewCommonData.tryUnwrap.viewConfirmationParameters.informees
lazy val viewParticipantData: ViewParticipantData = view.viewParticipantData.tryUnwrap

View File

@ -36,23 +36,22 @@ import java.util.UUID
final case class TransferInViewTree(
commonData: MerkleTreeLeaf[TransferInCommonData],
view: MerkleTree[TransferInView],
)(hashOps: HashOps)
extends GenTransferViewTree[
)(
override val representativeProtocolVersion: RepresentativeProtocolVersion[
TransferInViewTree.type
],
hashOps: HashOps,
) extends GenTransferViewTree[
TransferInCommonData,
TransferInView,
TransferInViewTree,
TransferInMediatorMessage,
](commonData, view)(hashOps) {
](commonData, view)(hashOps)
with HasProtocolVersionedWrapper[TransferInViewTree] {
def submittingParticipant: ParticipantId =
commonData.tryUnwrap.submitterMetadata.submittingParticipant
override def createMediatorMessage(
blindedTree: TransferInViewTree,
submittingParticipantSignature: Signature,
): TransferInMediatorMessage =
TransferInMediatorMessage(blindedTree, submittingParticipantSignature)
override private[data] def withBlindedSubtrees(
optimizedBlindingPolicy: PartialFunction[RootHash, MerkleTree.BlindingCommand]
): MerkleTree[TransferInViewTree] = {
@ -68,43 +67,68 @@ final case class TransferInViewTree(
TransferInViewTree(
commonData,
view.doBlind(optimizedBlindingPolicy),
)(hashOps)
)(representativeProtocolVersion, hashOps)
}
protected[this] override def createMediatorMessage(
blindedTree: TransferInViewTree,
submittingParticipantSignature: Signature,
): TransferInMediatorMessage =
TransferInMediatorMessage(blindedTree, submittingParticipantSignature)
override def pretty: Pretty[TransferInViewTree] = prettyOfClass(
param("common data", _.commonData),
param("view", _.view),
)
@transient override protected lazy val companionObj: TransferInViewTree.type =
TransferInViewTree
}
object TransferInViewTree
extends HasVersionedMessageWithContextCompanion[
extends HasProtocolVersionedWithContextAndValidationWithTargetProtocolVersionCompanion[
TransferInViewTree,
(HashOps, TargetProtocolVersion),
HashOps,
] {
override val name: String = "TransferInViewTree"
val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions(
ProtoVersion(30) -> ProtoCodec(
ProtocolVersion.v31,
supportedProtoVersion(v30.TransferViewTree)(fromProtoV30),
val supportedProtoVersions = SupportedProtoVersions(
ProtoVersion(30) -> VersionedProtoConverter(ProtocolVersion.v31)(v30.TransferViewTree)(
supportedProtoVersion(_)((context, proto) => fromProtoV30(context)(proto)),
_.toProtoV30.toByteString,
)
)
def fromProtoV30(
context: (HashOps, TargetProtocolVersion),
transferInViewTreeP: v30.TransferViewTree,
): ParsingResult[TransferInViewTree] = {
val (hashOps, targetProtocolVersion) = context
GenTransferViewTree.fromProtoV30(
TransferInCommonData.fromByteString(targetProtocolVersion.v)(
(hashOps, targetProtocolVersion)
),
TransferInView.fromByteString(targetProtocolVersion.v)(hashOps),
)((commonData, view) => new TransferInViewTree(commonData, view)(hashOps))(
transferInViewTreeP
def apply(
commonData: MerkleTreeLeaf[TransferInCommonData],
view: MerkleTree[TransferInView],
targetProtocolVersion: TargetProtocolVersion,
hashOps: HashOps,
): TransferInViewTree =
TransferInViewTree(commonData, view)(
TransferInViewTree.protocolVersionRepresentativeFor(targetProtocolVersion.v),
hashOps,
)
def fromProtoV30(context: (HashOps, TargetProtocolVersion))(
transferInViewTreeP: v30.TransferViewTree
): ParsingResult[TransferInViewTree] = {
val (hashOps, expectedProtocolVersion) = context
for {
rpv <- protocolVersionRepresentativeFor(ProtoVersion(30))
res <- GenTransferViewTree.fromProtoV30(
TransferInCommonData.fromByteString(expectedProtocolVersion.v)(
(hashOps, expectedProtocolVersion)
),
TransferInView.fromByteString(expectedProtocolVersion.v)(hashOps),
)((commonData, view) =>
TransferInViewTree(commonData, view)(
rpv,
hashOps,
)
)(transferInViewTreeP)
} yield res
}
}
@ -132,16 +156,13 @@ final case class TransferInCommonData private (
with HasProtocolVersionedWrapper[TransferInCommonData]
with ProtocolVersionedMemoizedEvidence {
@transient override protected lazy val companionObj: TransferInCommonData.type =
TransferInCommonData
override val representativeProtocolVersion
: RepresentativeProtocolVersion[TransferInCommonData.type] =
TransferInCommonData.protocolVersionRepresentativeFor(targetProtocolVersion.v)
def confirmingParties: Set[Informee] =
stakeholders.map(ConfirmingParty(_, PositiveInt.one))
@transient override protected lazy val companionObj: TransferInCommonData.type =
TransferInCommonData
protected def toProtoV30: v30.TransferInCommonData =
v30.TransferInCommonData(
salt = Some(salt.toProtoV30),
@ -152,11 +173,14 @@ final case class TransferInCommonData private (
submitterMetadata = Some(submitterMetadata.toProtoV30),
)
override def hashPurpose: HashPurpose = HashPurpose.TransferInCommonData
override protected[this] def toByteStringUnmemoized: ByteString =
super[HasProtocolVersionedWrapper].toByteString
override def hashPurpose: HashPurpose = HashPurpose.TransferInCommonData
def confirmingParties: Map[LfPartyId, PositiveInt] =
stakeholders.map(_ -> PositiveInt.one).toMap
override def pretty: Pretty[TransferInCommonData] = prettyOfClass(
param("submitter metadata", _.submitterMetadata),
param("target domain", _.targetDomain),
@ -212,8 +236,8 @@ object TransferInCommonData
uuidP,
targetMediatorP,
submitterMetadataPO,
) =
transferInCommonDataP
) = transferInCommonDataP
for {
salt <- ProtoConverter.parseRequired(Salt.fromProtoV30, "salt", saltP)
targetDomain <- TargetDomainId.fromProtoPrimitive(targetDomainP, "target_domain")
@ -226,6 +250,7 @@ object TransferInCommonData
submitterMetadata <- ProtoConverter
.required("submitter_metadata", submitterMetadataPO)
.flatMap(TransferSubmitterMetadata.fromProtoV30)
} yield TransferInCommonData(
salt,
targetDomain,
@ -233,11 +258,7 @@ object TransferInCommonData
stakeholders.toSet,
uuid,
submitterMetadata,
)(
hashOps,
targetProtocolVersion,
Some(bytes),
)
)(hashOps, targetProtocolVersion, Some(bytes))
}
}
@ -265,10 +286,13 @@ final case class TransferInView private (
with HasProtocolVersionedWrapper[TransferInView]
with ProtocolVersionedMemoizedEvidence {
override def hashPurpose: HashPurpose = HashPurpose.TransferInView
@transient override protected lazy val companionObj: TransferInView.type = TransferInView
override protected[this] def toByteStringUnmemoized: ByteString =
super[HasProtocolVersionedWrapper].toByteString
def hashPurpose: HashPurpose = HashPurpose.TransferInView
protected def toProtoV30: v30.TransferInView =
v30.TransferInView(
salt = Some(salt.toProtoV30),
@ -279,14 +303,15 @@ final case class TransferInView private (
transferCounter = transferCounter.toProtoPrimitive,
)
override protected[this] def toByteStringUnmemoized: ByteString =
super[HasProtocolVersionedWrapper].toByteString
override def pretty: Pretty[TransferInView] = prettyOfClass(
param("contract", _.contract), // TODO(#3269) this may contain confidential data
param("transfer counter", _.transferCounter),
param("creating transaction id", _.creatingTransactionId),
param("transfer out result", _.transferOutResultEvent),
param("transfer out result event", _.transferOutResultEvent),
param("source protocol version", _.sourceProtocolVersion.v),
param("transfer counter", _.transferCounter),
param(
"contract id",
_.contract.contractId,
), // do not log contract details because it contains confidential data
param("salt", _.salt),
)
}
@ -295,42 +320,6 @@ object TransferInView
extends HasMemoizedProtocolVersionedWithContextCompanion[TransferInView, HashOps] {
override val name: String = "TransferInView"
private[TransferInView] final case class CommonData(
salt: Salt,
creatingTransactionId: TransactionId,
transferOutResultEvent: DeliveredTransferOutResult,
sourceProtocolVersion: SourceProtocolVersion,
)
private[TransferInView] object CommonData {
def fromProto(
hashOps: HashOps,
saltP: Option[com.digitalasset.canton.crypto.v30.Salt],
transferOutResultEventP: ByteString,
creatingTransactionIdP: ByteString,
sourceProtocolVersion: ProtocolVersion,
): ParsingResult[CommonData] = {
for {
salt <- ProtoConverter.parseRequired(Salt.fromProtoV30, "salt", saltP)
// TransferOutResultEvent deserialization
transferOutResultEventMC <- SignedContent
.fromByteString(sourceProtocolVersion)(transferOutResultEventP)
.flatMap(
_.deserializeContent(SequencedEvent.fromByteStringOpen(hashOps, sourceProtocolVersion))
)
transferOutResultEvent <- DeliveredTransferOutResult
.create(NoOpeningErrors(transferOutResultEventMC))
.leftMap(err => OtherError(err.toString))
creatingTransactionId <- TransactionId.fromProtoPrimitive(creatingTransactionIdP)
} yield CommonData(
salt,
creatingTransactionId,
transferOutResultEvent,
SourceProtocolVersion(sourceProtocolVersion),
)
}
}
val supportedProtoVersions = SupportedProtoVersions(
ProtoVersion(30) -> VersionedProtoConverter(ProtocolVersion.v31)(v30.TransferInView)(
supportedProtoVersionMemoized(_)(fromProtoV30),
@ -373,12 +362,13 @@ object TransferInView
transferInViewP
for {
protocolVersion <- ProtocolVersion.fromProtoPrimitive(sourceProtocolVersionP)
sourceProtocolVersion = SourceProtocolVersion(protocolVersion)
commonData <- CommonData.fromProto(
hashOps,
saltP,
transferOutResultEventP,
creatingTransactionIdP,
protocolVersion,
sourceProtocolVersion,
)
contract <- ProtoConverter
.required("contract", contractP)
@ -393,6 +383,44 @@ object TransferInView
TransferCounter(transferCounterP),
)(hashOps, rpv, Some(bytes))
}
private[TransferInView] final case class CommonData(
salt: Salt,
creatingTransactionId: TransactionId,
transferOutResultEvent: DeliveredTransferOutResult,
sourceProtocolVersion: SourceProtocolVersion,
)
private[TransferInView] object CommonData {
def fromProto(
hashOps: HashOps,
saltP: Option[com.digitalasset.canton.crypto.v30.Salt],
transferOutResultEventP: ByteString,
creatingTransactionIdP: ByteString,
sourceProtocolVersion: SourceProtocolVersion,
): ParsingResult[CommonData] = {
for {
salt <- ProtoConverter.parseRequired(Salt.fromProtoV30, "salt", saltP)
// TransferOutResultEvent deserialization
transferOutResultEventMC <- SignedContent
.fromByteString(sourceProtocolVersion.v)(transferOutResultEventP)
.flatMap(
_.deserializeContent(
SequencedEvent.fromByteStringOpen(hashOps, sourceProtocolVersion.v)
)
)
transferOutResultEvent <- DeliveredTransferOutResult
.create(NoOpeningErrors(transferOutResultEventMC))
.leftMap(err => OtherError(err.toString))
creatingTransactionId <- TransactionId.fromProtoPrimitive(creatingTransactionIdP)
} yield CommonData(
salt,
creatingTransactionId,
transferOutResultEvent,
sourceProtocolVersion,
)
}
}
}
/** A fully unblinded [[TransferInViewTree]]
@ -401,7 +429,7 @@ object TransferInView
*/
final case class FullTransferInTree(tree: TransferInViewTree)
extends TransferViewTree
with HasVersionedToByteString
with HasToByteString
with PrettyPrinting {
require(tree.isFullyUnblinded, "A transfer-in request must be fully unblinded")
@ -428,26 +456,26 @@ final case class FullTransferInTree(tree: TransferInViewTree)
submittingParticipantSignature: Signature
): TransferInMediatorMessage = tree.mediatorMessage(submittingParticipantSignature)
override def domainId: DomainId = commonData.targetDomain.unwrap
def targetDomain: TargetDomainId = commonData.targetDomain
override def domainId: DomainId = commonData.targetDomain.unwrap
override def mediator: MediatorGroupRecipient = commonData.targetMediator
override def informees: Set[Informee] = commonData.confirmingParties
override def informees: Set[LfPartyId] = commonData.confirmingParties.keySet
override def toBeSigned: Option[RootHash] = Some(tree.rootHash)
override def viewHash: ViewHash = tree.viewHash
override def rootHash: RootHash = tree.rootHash
override def isTransferringParticipant(participantId: ParticipantId): Boolean =
transferOutResultEvent.unwrap.informees.contains(participantId.adminParty.toLf)
override def toByteString(version: ProtocolVersion): ByteString = tree.toByteString(version)
override def rootHash: RootHash = tree.rootHash
override def pretty: Pretty[FullTransferInTree] = prettyOfClass(unnamedParam(_.tree))
override def toByteString: ByteString = tree.toByteString
}
object FullTransferInTree {
@ -456,9 +484,7 @@ object FullTransferInTree {
targetProtocolVersion: TargetProtocolVersion,
)(bytes: ByteString): ParsingResult[FullTransferInTree] =
for {
tree <- TransferInViewTree.fromTrustedByteString((crypto, targetProtocolVersion))(
bytes
) // FIXME(i18236): validate the proto version to mitigate downgrading attacks
tree <- TransferInViewTree.fromByteString(crypto, targetProtocolVersion)(bytes)
_ <- EitherUtil.condUnitE(
tree.isFullyUnblinded,
OtherError(s"Transfer-in request ${tree.rootHash} is not fully unblinded"),

View File

@ -24,7 +24,7 @@ import com.google.protobuf.ByteString
import java.util.UUID
/** A transfer-out request embedded in a Merkle tree. The view may or may not be blinded. */
final case class TransferOutViewTree private (
final case class TransferOutViewTree(
commonData: MerkleTreeLeaf[TransferOutCommonData],
view: MerkleTree[TransferOutView],
)(
@ -38,7 +38,7 @@ final case class TransferOutViewTree private (
TransferOutViewTree,
TransferOutMediatorMessage,
](commonData, view)(hashOps)
with HasRepresentativeProtocolVersion {
with HasProtocolVersionedWrapper[TransferOutViewTree] {
def submittingParticipant: ParticipantId =
commonData.tryUnwrap.submitterMetadata.submittingParticipant
@ -77,7 +77,7 @@ final case class TransferOutViewTree private (
}
object TransferOutViewTree
extends HasProtocolVersionedWithContextAndValidationCompanion[
extends HasProtocolVersionedWithContextAndValidationWithSourceProtocolVersionCompanion[
TransferOutViewTree,
HashOps,
] {
@ -94,27 +94,26 @@ object TransferOutViewTree
def apply(
commonData: MerkleTreeLeaf[TransferOutCommonData],
view: MerkleTree[TransferOutView],
protocolVersion: ProtocolVersion,
sourceProtocolVersion: SourceProtocolVersion,
hashOps: HashOps,
): TransferOutViewTree =
TransferOutViewTree(commonData, view)(
TransferOutViewTree.protocolVersionRepresentativeFor(protocolVersion),
TransferOutViewTree.protocolVersionRepresentativeFor(sourceProtocolVersion.v),
hashOps,
)
def fromProtoV30(context: (HashOps, ProtocolVersion))(
def fromProtoV30(context: (HashOps, SourceProtocolVersion))(
transferOutViewTreeP: v30.TransferViewTree
): ParsingResult[TransferOutViewTree] = {
val (hashOps, expectedProtocolVersion) = context
val sourceProtocolVersion = SourceProtocolVersion(expectedProtocolVersion)
for {
rpv <- protocolVersionRepresentativeFor(ProtoVersion(30))
res <- GenTransferViewTree.fromProtoV30(
TransferOutCommonData.fromByteString(expectedProtocolVersion)(
(hashOps, sourceProtocolVersion)
TransferOutCommonData.fromByteString(expectedProtocolVersion.v)(
(hashOps, expectedProtocolVersion)
),
TransferOutView.fromByteString(expectedProtocolVersion)(hashOps),
TransferOutView.fromByteString(expectedProtocolVersion.v)(hashOps),
)((commonData, view) =>
TransferOutViewTree(commonData, view)(
rpv,
@ -146,7 +145,7 @@ final case class TransferOutCommonData private (
submitterMetadata: TransferSubmitterMetadata,
)(
hashOps: HashOps,
val protocolVersion: SourceProtocolVersion,
val sourceProtocolVersion: SourceProtocolVersion,
override val deserializedFrom: Option[ByteString],
) extends MerkleTreeLeaf[TransferOutCommonData](hashOps)
with HasProtocolVersionedWrapper[TransferOutCommonData]
@ -157,7 +156,7 @@ final case class TransferOutCommonData private (
override val representativeProtocolVersion
: RepresentativeProtocolVersion[TransferOutCommonData.type] =
TransferOutCommonData.protocolVersionRepresentativeFor(protocolVersion.v)
TransferOutCommonData.protocolVersionRepresentativeFor(sourceProtocolVersion.v)
protected def toProtoV30: v30.TransferOutCommonData =
v30.TransferOutCommonData(
@ -175,8 +174,8 @@ final case class TransferOutCommonData private (
override def hashPurpose: HashPurpose = HashPurpose.TransferOutCommonData
def confirmingParties: Set[Informee] =
(stakeholders ++ adminParties).map(ConfirmingParty(_, PositiveInt.one))
def confirmingParties: Map[LfPartyId, PositiveInt] =
(stakeholders ++ adminParties).map(_ -> PositiveInt.one).toMap
override def pretty: Pretty[TransferOutCommonData] = prettyOfClass(
param("submitter metadata", _.submitterMetadata),
@ -211,7 +210,7 @@ object TransferOutCommonData
adminParties: Set[LfPartyId],
uuid: UUID,
submitterMetadata: TransferSubmitterMetadata,
protocolVersion: SourceProtocolVersion,
sourceProtocolVersion: SourceProtocolVersion,
): TransferOutCommonData = TransferOutCommonData(
salt,
sourceDomain,
@ -220,7 +219,7 @@ object TransferOutCommonData
adminParties,
uuid,
submitterMetadata,
)(hashOps, protocolVersion, None)
)(hashOps, sourceProtocolVersion, None)
private[this] def fromProtoV30(
context: (HashOps, SourceProtocolVersion),
@ -268,9 +267,8 @@ object TransferOutCommonData
/** Aggregates the data of a transfer-out request that is only sent to the involved participants
*/
/** @param salt The salt used to blind the Merkle hash.
* @param submitterMetadata Metadata of the submitter
* @param creatingTransactionId Id of the transaction that created the contract
* @param contract Contract being transferred
* @param creatingTransactionId Id of the transaction that created the contract
* @param targetDomain The domain to which the contract is transferred.
* @param targetTimeProof The sequenced event from the target domain whose timestamp defines
* the baseline for measuring time periods on the target domain
@ -278,8 +276,8 @@ object TransferOutCommonData
*/
final case class TransferOutView private (
override val salt: Salt,
creatingTransactionId: TransactionId,
contract: SerializableContract,
creatingTransactionId: TransactionId,
targetDomain: TargetDomainId,
targetTimeProof: TimeProof,
targetProtocolVersion: TargetProtocolVersion,
@ -314,12 +312,16 @@ final case class TransferOutView private (
)
override def pretty: Pretty[TransferOutView] = prettyOfClass(
param("creating transaction id", _.creatingTransactionId),
param("template id", _.templateId),
param("creatingTransactionId", _.creatingTransactionId),
param("contract", _.contract),
param("target domain", _.targetDomain),
param("target time proof", _.targetTimeProof),
param("target protocol version", _.targetProtocolVersion.v),
param("transfer counter", _.transferCounter),
param(
"contract id",
_.contract.contractId,
), // do not log contract details because it contains confidential data
param("salt", _.salt),
)
}
@ -337,8 +339,8 @@ object TransferOutView
def create(hashOps: HashOps)(
salt: Salt,
creatingTransactionId: TransactionId,
contract: SerializableContract,
creatingTransactionId: TransactionId,
targetDomain: TargetDomainId,
targetTimeProof: TimeProof,
sourceProtocolVersion: SourceProtocolVersion,
@ -347,8 +349,8 @@ object TransferOutView
): TransferOutView =
TransferOutView(
salt,
creatingTransactionId,
contract,
creatingTransactionId,
targetDomain,
targetTimeProof,
targetProtocolVersion,
@ -382,8 +384,8 @@ object TransferOutView
rpv <- protocolVersionRepresentativeFor(ProtoVersion(30))
} yield TransferOutView(
salt,
creatingTransactionId,
contract,
creatingTransactionId,
TargetDomainId(targetDomain),
targetTimeProof,
TargetProtocolVersion(targetProtocolVersion),
@ -403,7 +405,7 @@ object TransferOutView
*/
final case class FullTransferOutTree(tree: TransferOutViewTree)
extends TransferViewTree
with HasVersionedToByteString
with HasToByteString
with PrettyPrinting {
require(tree.isFullyUnblinded, "A transfer-out request must be fully unblinded")
@ -413,6 +415,7 @@ final case class FullTransferOutTree(tree: TransferOutViewTree)
def submitterMetadata: TransferSubmitterMetadata = commonData.submitterMetadata
def submitter: LfPartyId = submitterMetadata.submitter
def workflowId: Option[LfWorkflowId] = submitterMetadata.workflowId
def stakeholders: Set[LfPartyId] = commonData.stakeholders
@ -429,18 +432,17 @@ final case class FullTransferOutTree(tree: TransferOutViewTree)
def targetDomain: TargetDomainId = view.targetDomain
def targetDomainPV: TargetProtocolVersion = view.targetProtocolVersion
def targetTimeProof: TimeProof = view.targetTimeProof
def mediatorMessage(submittingParticipantSignature: Signature): TransferOutMediatorMessage =
tree.mediatorMessage(submittingParticipantSignature)
def mediatorMessage(
submittingParticipantSignature: Signature
): TransferOutMediatorMessage = tree.mediatorMessage(submittingParticipantSignature)
override def domainId: DomainId = sourceDomain.unwrap
override def mediator: MediatorGroupRecipient = commonData.sourceMediator
override def informees: Set[Informee] = commonData.confirmingParties
override def informees: Set[LfPartyId] = commonData.confirmingParties.keySet
override def toBeSigned: Option[RootHash] = Some(tree.rootHash)
@ -453,7 +455,7 @@ final case class FullTransferOutTree(tree: TransferOutViewTree)
override def pretty: Pretty[FullTransferOutTree] = prettyOfClass(unnamedParam(_.tree))
override def toByteString(version: ProtocolVersion): ByteString = tree.toByteString(version)
override def toByteString: ByteString = tree.toByteString
}
object FullTransferOutTree {
@ -462,7 +464,7 @@ object FullTransferOutTree {
sourceProtocolVersion: SourceProtocolVersion,
)(bytes: ByteString): ParsingResult[FullTransferOutTree] =
for {
tree <- TransferOutViewTree.fromByteString(crypto, sourceProtocolVersion.v)(bytes)
tree <- TransferOutViewTree.fromByteString(crypto, sourceProtocolVersion)(bytes)
_ <- EitherUtil.condUnitE(
tree.isFullyUnblinded,
OtherError(s"Transfer-out request ${tree.rootHash} is not fully unblinded"),

View File

@ -5,21 +5,21 @@ package com.digitalasset.canton.data
import cats.syntax.either.*
import cats.syntax.traverse.*
import com.digitalasset.canton.LfPartyId
import com.digitalasset.canton.ProtoDeserializationError.InvariantViolation
import com.digitalasset.canton.config.RequireTypes.NonNegativeInt
import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt}
import com.digitalasset.canton.crypto.*
import com.digitalasset.canton.logging.pretty.Pretty
import com.digitalasset.canton.data.ViewConfirmationParameters.InvalidViewConfirmationParameters
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.protocol.{ConfirmationPolicy, v30}
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.serialization.{ProtoConverter, ProtocolVersionedMemoizedEvidence}
import com.digitalasset.canton.util.NoCopy
import com.digitalasset.canton.version.*
import com.google.common.annotations.VisibleForTesting
import com.google.protobuf.ByteString
/** Information concerning every '''member''' involved in processing the underlying view.
*
* @param threshold If the sum of the weights of the parties approving the view attains the threshold,
* the view is considered approved.
*/
// This class is a reference example of serialization best practices, demonstrating:
// - memoized serialization, which is required if we need to compute a signature or cryptographic hash of a class
@ -30,8 +30,7 @@ import com.google.protobuf.ByteString
//
// Optional parameters are strongly discouraged, as each parameter needs to be consciously set in a production context.
final case class ViewCommonData private (
informees: Set[Informee],
threshold: NonNegativeInt,
viewConfirmationParameters: ViewConfirmationParameters,
salt: Salt,
)(
hashOps: HashOps,
@ -53,14 +52,21 @@ final case class ViewCommonData private (
@transient override protected lazy val companionObj: ViewCommonData.type = ViewCommonData
// Ensures the invariants related to default values hold
validateInstance().valueOr(err => throw InvalidViewConfirmationParameters(err))
// We use named parameters, because then the code remains correct even when the ProtoBuf code generator
// changes the order of parameters.
def toProtoV30: v30.ViewCommonData =
def toProtoV30: v30.ViewCommonData = {
val informees = viewConfirmationParameters.informees.toSeq
v30.ViewCommonData(
informees = informees.map(_.toProtoV30).toSeq,
threshold = threshold.unwrap,
informees = informees,
quorums = viewConfirmationParameters.quorums.map(
_.tryToProtoV30(informees)
),
salt = Some(salt.toProtoV30),
)
}
// When serializing the class to an anonymous binary format, we serialize it to an UntypedVersionedMessage version of the
// corresponding Protobuf message
@ -69,18 +75,20 @@ final case class ViewCommonData private (
override val hashPurpose: HashPurpose = HashPurpose.ViewCommonData
override def pretty: Pretty[ViewCommonData] = prettyOfClass(
param("informees", _.informees),
param("threshold", _.threshold),
param("view confirmation parameters", _.viewConfirmationParameters),
param("salt", _.salt),
)
@VisibleForTesting
def copy(
informees: Set[Informee] = this.informees,
threshold: NonNegativeInt = this.threshold,
viewConfirmationParameters: ViewConfirmationParameters = this.viewConfirmationParameters,
salt: Salt = this.salt,
): ViewCommonData =
ViewCommonData(informees, threshold, salt)(hashOps, representativeProtocolVersion, None)
ViewCommonData(viewConfirmationParameters, salt)(
hashOps,
representativeProtocolVersion,
None,
)
}
object ViewCommonData
@ -106,40 +114,139 @@ object ViewCommonData
// to not confuse the Idea compiler by overloading "apply".
// (This is not a problem with this particular class, but it has been a problem with other classes.)
def create(hashOps: HashOps)(
informees: Set[Informee],
threshold: NonNegativeInt,
viewConfirmationParameters: ViewConfirmationParameters,
salt: Salt,
protocolVersion: ProtocolVersion,
): Either[InvalidViewConfirmationParameters, ViewCommonData] =
Either
.catchOnly[InvalidViewConfirmationParameters] {
// The deserializedFrom field is set to "None" as this is for creating "fresh" instances.
new ViewCommonData(viewConfirmationParameters, salt)(
hashOps,
protocolVersionRepresentativeFor(protocolVersion),
None,
)
}
def tryCreate(hashOps: HashOps)(
viewConfirmationParameters: ViewConfirmationParameters,
salt: Salt,
protocolVersion: ProtocolVersion,
): ViewCommonData =
// The deserializedFrom field is set to "None" as this is for creating "fresh" instances.
new ViewCommonData(informees, threshold, salt)(
hashOps,
protocolVersionRepresentativeFor(protocolVersion),
None,
)
create(hashOps)(viewConfirmationParameters, salt, protocolVersion)
.valueOr(err => throw err)
private def fromProtoV30(
context: (HashOps, ConfirmationPolicy),
viewCommonDataP: v30.ViewCommonData,
)(bytes: ByteString): ParsingResult[ViewCommonData] = {
val (hashOps, _confirmationPolicy) = context
// TODO(#19152): remove confirmation policy
val (hashOps, _) = context
for {
informees <- viewCommonDataP.informees.traverse(Informee.fromProtoV30)
informees <- viewCommonDataP.informees.traverse(informee =>
ProtoConverter.parseLfPartyId(informee)
)
salt <- ProtoConverter
.parseRequired(Salt.fromProtoV30, "salt", viewCommonDataP.salt)
.leftMap(_.inField("salt"))
threshold <- NonNegativeInt
.create(viewCommonDataP.threshold)
.leftMap(InvariantViolation.toProtoDeserializationError)
.leftMap(_.inField("threshold"))
quorums <- viewCommonDataP.quorums.traverse(Quorum.fromProtoV30(_, informees))
rpv <- protocolVersionRepresentativeFor(ProtoVersion(30))
} yield new ViewCommonData(informees.toSet, threshold, salt)(
viewConfirmationParameters <- ViewConfirmationParameters.create(informees.toSet, quorums)
} yield new ViewCommonData(viewConfirmationParameters, salt)(
hashOps,
rpv,
Some(bytes),
)
}
}
/** Stores the necessary information necessary to confirm a view.
*
* @param informees list of all members ids that must be informed of this view.
* @param quorums multiple lists of confirmers => threshold (i.e., a quorum) that needs
* to be met for the view to be approved. We make sure that the parties listed
* in the quorums are informees of the view during
* deserialization.
*/
final case class ViewConfirmationParameters private (
informees: Set[LfPartyId],
quorums: Seq[Quorum],
) extends PrettyPrinting
with NoCopy {
override def pretty: Pretty[ViewConfirmationParameters] = prettyOfClass(
param("informees", _.informees),
param("quorums", _.quorums),
)
lazy val confirmers: Set[LfPartyId] = quorums.flatMap { _.confirmers.keys }.toSet
}
object ViewConfirmationParameters {
/** Indicates an attempt to create an invalid [[ViewConfirmationParameters]]. */
final case class InvalidViewConfirmationParameters(message: String)
extends RuntimeException(message)
/** Creates a [[ViewConfirmationParameters]] with a single quorum consisting of all confirming parties and a given threshold.
*/
def create(
informees: Map[LfPartyId, NonNegativeInt],
threshold: NonNegativeInt,
): ViewConfirmationParameters =
ViewConfirmationParameters(
informees.keySet,
Seq(
Quorum(
informees
.filter { case (_, weight) => weight.unwrap > 0 }
.map { case (partyId, weight) => partyId -> PositiveInt.tryCreate(weight.unwrap) },
threshold,
)
),
)
/** Creates a [[ViewConfirmationParameters]] where all informees are confirmers and
* includes a single quorum consisting of all confirming parties and a given threshold.
*/
def createOnlyWithConfirmers(
confirmers: Map[LfPartyId, PositiveInt],
threshold: NonNegativeInt,
): ViewConfirmationParameters =
ViewConfirmationParameters(
confirmers.keySet,
Seq(
Quorum(
confirmers,
threshold,
)
),
)
/** There can be multiple quorums/threshold. Therefore, we need to make sure those quorums confirmers
* are present in the list of informees.
*/
def create(
informees: Set[LfPartyId],
quorums: Seq[Quorum],
): Either[InvariantViolation, ViewConfirmationParameters] = {
val allConfirmers = quorums.flatMap(_.confirmers.keys)
val notAnInformee = allConfirmers.filterNot(informees.contains)
Either.cond(
notAnInformee.isEmpty,
ViewConfirmationParameters(informees, quorums),
InvariantViolation(s"confirming parties $notAnInformee are not in the list of informees"),
)
}
def tryCreate(
informees: Set[LfPartyId],
quorums: Seq[Quorum],
): ViewConfirmationParameters =
create(informees, quorums).valueOr(err => throw InvalidViewConfirmationParameters(err.toString))
/** Extracts all confirming parties' distinct IDs from the list of quorums */
def confirmersIdsFromQuorums(quorums: Seq[Quorum]): Set[LfPartyId] =
quorums.flatMap(_.confirmers.keySet).toSet
}

View File

@ -3,6 +3,7 @@
package com.digitalasset.canton.data
import com.digitalasset.canton.LfPartyId
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.protocol.{RootHash, ViewHash}
import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient
@ -12,7 +13,7 @@ import com.digitalasset.canton.topology.{DomainId, ParticipantId}
trait ViewTree extends PrettyPrinting {
/** The informees of the view in the tree */
def informees: Set[Informee]
def informees: Set[LfPartyId]
/** Return the hash whose signature is to be included in the [[com.digitalasset.canton.protocol.messages.EncryptedViewMessage]] */
def toBeSigned: Option[RootHash]

View File

@ -7,14 +7,14 @@ import com.digitalasset.canton.ProtoDeserializationError.{FieldNotSet, ValueConv
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.protocol.{RequestProcessor, v30}
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.version.HasVersionedToByteString
import com.digitalasset.canton.version.HasToByteString
/** Reifies the subclasses of [[ViewTree]] as values */
// This trait does not extend ProtoSerializable because v0.EncryptedViewMessage.ViewType is an enum, not a message.
sealed trait ViewType extends Product with Serializable with PrettyPrinting {
/** The subclass of [[ViewTree]] that is reified. */
type View <: ViewTree with HasVersionedToByteString
type View <: ViewTree with HasToByteString
type FullView <: ViewTree
@ -53,7 +53,7 @@ object ViewType {
type TransactionViewType = TransactionViewType.type
sealed trait TransferViewType extends ViewType {
type View <: TransferViewTree with HasVersionedToByteString
type View <: TransferViewTree with HasToByteString
type FullView = View
override type ViewSubmitterMetadata = TransferSubmitterMetadata
}

View File

@ -6,6 +6,7 @@ package com.digitalasset.canton.data
import cats.data.EitherT
import cats.syntax.foldable.*
import com.daml.nonempty.{NonEmpty, NonEmptyUtil}
import com.digitalasset.canton.LfPartyId
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.sequencing.protocol.{
MemberRecipient,
@ -24,10 +25,10 @@ import scala.concurrent.{ExecutionContext, Future}
* By convention, the order is: the view's informees are at the head of the list, then the parent's views informees,
* then the grandparent's, etc.
*/
final case class Witnesses(unwrap: NonEmpty[Seq[Set[Informee]]]) {
final case class Witnesses(unwrap: NonEmpty[Seq[Set[LfPartyId]]]) {
import Witnesses.*
def prepend(informees: Set[Informee]) = Witnesses(informees +: unwrap)
def prepend(informees: Set[LfPartyId]): Witnesses = Witnesses(informees +: unwrap)
/** Derive a recipient tree that mirrors the given hierarchy of witnesses. */
def toRecipients(
@ -39,7 +40,7 @@ final case class Witnesses(unwrap: NonEmpty[Seq[Set[Informee]]]) {
for {
recipientsList <- unwrap.forgetNE.foldLeftM(Seq.empty[RecipientsTree]) {
(children, informees) =>
val parties = informees.map(_.party).toList
val parties = informees.toList
for {
informeeParticipants <- EitherT
.right[InvalidWitnesses](
@ -82,7 +83,7 @@ final case class Witnesses(unwrap: NonEmpty[Seq[Set[Informee]]]) {
recipients = Recipients(NonEmptyUtil.fromUnsafe(recipientsList))
} yield recipients
def flatten: Set[Informee] = unwrap.foldLeft(Set.empty[Informee])(_ union _)
def flatten: Set[LfPartyId] = unwrap.foldLeft(Set.empty[LfPartyId])(_ union _)
}

View File

@ -153,6 +153,11 @@ object FutureUnlessShutdownImpl {
FutureUnlessShutdown(unwrap.transformWith(Instance.unsubst[K](f)))
}
def andThen[B](pf: PartialFunction[Try[UnlessShutdown[A]], B])(implicit
executor: ExecutionContext
): FutureUnlessShutdown[A] =
FutureUnlessShutdown(unwrap.andThen(pf))
/** Analog to [[scala.concurrent.Future]].onComplete */
def onComplete[B](f: Try[UnlessShutdown[A]] => Unit)(implicit ec: ExecutionContext): Unit =
unwrap.onComplete(f)
@ -165,6 +170,11 @@ object FutureUnlessShutdownImpl {
def onShutdown[B >: A](f: => B)(implicit ec: ExecutionContext): Future[B] =
unwrap.map(_.onShutdown(f))
@throws[AbortedDueToShutdownException]("if a shutdown signal has been received.")
def failOnShutdownToAbortException(action: String)(implicit ec: ExecutionContext): Future[A] =
failOnShutdownTo(AbortedDueToShutdownException(action))
/** consider using [[failOnShutdownToAbortException]] unless you need a specific exception. */
def failOnShutdownTo(t: => Throwable)(implicit ec: ExecutionContext): Future[A] = {
unwrap.flatMap {
case UnlessShutdown.Outcome(result) => Future.successful(result)
@ -344,6 +354,11 @@ object FutureUnlessShutdownImpl {
implicit class EitherTOnShutdownSyntax[A, B](
private val eitherT: EitherT[FutureUnlessShutdown, A, B]
) extends AnyVal {
def failOnShutdownTo[C >: A, D >: B](t: => Throwable)(implicit
ec: ExecutionContext
): EitherT[Future, C, D] =
EitherT(eitherT.value.failOnShutdownTo(t))
def onShutdown[C >: A, D >: B](f: => Either[C, D])(implicit
ec: ExecutionContext
): EitherT[Future, C, D] =
@ -365,4 +380,8 @@ object FutureUnlessShutdownImpl {
def future[T](timer: Timer, future: => FutureUnlessShutdown[T]): FutureUnlessShutdown[T] =
FutureUnlessShutdown(timed.future(timer, future.unwrap))
}
final case class AbortedDueToShutdownException(action: String)
extends RuntimeException(s"'$action' was aborted due to shutdown.")
}

View File

@ -6,17 +6,19 @@ package com.digitalasset.canton.protocol
import cats.Order
import com.digitalasset.canton.LfPartyId
import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt}
import com.digitalasset.canton.data.{ConfirmingParty, Informee, PlainInformee}
import com.digitalasset.canton.data.{Quorum, ViewConfirmationParameters}
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.serialization.{
DefaultDeserializationError,
DeserializationError,
DeterministicEncoding,
}
import com.digitalasset.canton.topology.ParticipantId
import com.digitalasset.canton.topology.client.TopologySnapshot
import com.digitalasset.canton.topology.transaction.ParticipantAttributes
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.LfTransactionUtil
import com.google.common.annotations.VisibleForTesting
import com.google.protobuf.ByteString
import scala.concurrent.{ExecutionContext, Future}
@ -27,46 +29,54 @@ sealed trait ConfirmationPolicy extends Product with Serializable with PrettyPri
def toProtoPrimitive: ByteString = DeterministicEncoding.encodeString(name)
def informeesAndThreshold(actionNode: LfActionNode, topologySnapshot: TopologySnapshot)(implicit
/** Returns informees, participants hosting those informees,
* and corresponding threshold for a given action node.
*/
def informeesParticipantsAndThreshold(
actionNode: LfActionNode,
topologySnapshot: TopologySnapshot,
)(implicit
ec: ExecutionContext,
tc: TraceContext,
): Future[(Set[Informee], NonNegativeInt)]
): Future[(Map[LfPartyId, (Set[ParticipantId], NonNegativeInt)], NonNegativeInt)]
/** The minimum threshold for views of requests with this policy.
* The mediator checks that all views have at least the given threshold.
/** Returns informees and corresponding threshold for a given action node.
*/
def minimumThreshold(informees: Set[Informee]): NonNegativeInt = NonNegativeInt.one
protected def additionalWeightOfSubmittingAdminParty(
informees: Set[Informee],
adminParty: LfPartyId,
): NonNegativeInt =
informees
.collectFirst { case ConfirmingParty(`adminParty`, _) => NonNegativeInt.zero }
.getOrElse(NonNegativeInt.one)
@VisibleForTesting
def informeesAndThreshold(
actionNode: LfActionNode,
topologySnapshot: TopologySnapshot,
)(implicit
ec: ExecutionContext,
tc: TraceContext,
): Future[(Map[LfPartyId, NonNegativeInt], NonNegativeInt)]
/** This method adds an additional quorum with the submitting admin party with threshold 1, thus making sure
* that the submitting admin party has to confirm the view for it to be accepted.
*/
def withSubmittingAdminParty(
submittingAdminPartyO: Option[LfPartyId]
)(informees: Set[Informee], threshold: NonNegativeInt): (Set[Informee], NonNegativeInt) =
)(viewConfirmationParameters: ViewConfirmationParameters): ViewConfirmationParameters =
submittingAdminPartyO match {
case Some(submittingAdminParty) =>
val oldSubmittingInformee = informees
.find(_.party == submittingAdminParty)
.getOrElse(PlainInformee(submittingAdminParty))
val additionalWeight =
additionalWeightOfSubmittingAdminParty(
informees,
submittingAdminParty,
val newQuorum = Quorum(
Map(submittingAdminParty -> PositiveInt.one),
NonNegativeInt.one,
)
if (viewConfirmationParameters.quorums.contains(newQuorum))
viewConfirmationParameters
else {
val newQuorumList = viewConfirmationParameters.quorums :+ newQuorum
/* We are using tryCreate() because we are sure that the new confirmer is in the list of informees, since
* it is added at the same time.
*/
ViewConfirmationParameters.tryCreate(
viewConfirmationParameters.informees + submittingAdminParty,
newQuorumList,
)
val newSubmittingInformee =
oldSubmittingInformee.withAdditionalWeight(additionalWeight)
val newInformees = informees - oldSubmittingInformee + newSubmittingInformee
val newThreshold = threshold + additionalWeight
newInformees -> newThreshold
case None => informees -> threshold
}
case None => viewConfirmationParameters
}
override def pretty: Pretty[ConfirmationPolicy] = prettyOfObject[ConfirmationPolicy]
@ -76,27 +86,24 @@ object ConfirmationPolicy {
private val havingConfirmer: ParticipantAttributes => Boolean = _.permission.canConfirm
private def toInformeesAndThreshold(
confirmingParties: Set[LfPartyId],
plainInformees: Set[LfPartyId],
): (Set[Informee], NonNegativeInt) = {
// We make sure that the threshold is at least 1 so that a transaction is not vacuously approved if the confirming parties are empty.
val threshold = NonNegativeInt.tryCreate(Math.max(confirmingParties.size, 1))
val informees =
confirmingParties.map(ConfirmingParty(_, PositiveInt.one): Informee) ++
plainInformees.map(PlainInformee)
(informees, threshold)
}
case object Signatory extends ConfirmationPolicy {
override val name = "Signatory"
protected override val index: Int = 0
override def informeesAndThreshold(node: LfActionNode, topologySnapshot: TopologySnapshot)(
implicit
ec: ExecutionContext,
tc: TraceContext,
): Future[(Set[Informee], NonNegativeInt)] = {
private def toInformeesAndThreshold(
confirmingParties: Set[LfPartyId],
plainInformees: Set[LfPartyId],
): (Map[LfPartyId, NonNegativeInt], NonNegativeInt) = {
val threshold = NonNegativeInt.tryCreate(confirmingParties.size)
val informees =
confirmingParties.map(_ -> NonNegativeInt.one) ++
plainInformees.map(_ -> NonNegativeInt.zero)
(informees.toMap, threshold)
}
private def getPlainInformeesAndConfirmingParties(
node: LfActionNode
): (Set[LfPartyId], Set[LfPartyId]) = {
val confirmingParties =
LfTransactionUtil.signatoriesOrMaintainers(node) | LfTransactionUtil.actingParties(node)
require(
@ -104,10 +111,49 @@ object ConfirmationPolicy {
"There must be at least one confirming party, as every node must have at least one signatory.",
)
val plainInformees = node.informeesOfNode -- confirmingParties
Future.successful(
toInformeesAndThreshold(confirmingParties, plainInformees)
)
(plainInformees, confirmingParties)
}
override def informeesParticipantsAndThreshold(
node: LfActionNode,
topologySnapshot: TopologySnapshot,
)(implicit
ec: ExecutionContext,
traceContext: TraceContext,
): Future[
(Map[LfPartyId, (Set[ParticipantId], NonNegativeInt)], NonNegativeInt)
] = {
val (plainInformees, confirmingParties) = getPlainInformeesAndConfirmingParties(node)
val threshold = NonNegativeInt.tryCreate(confirmingParties.size)
val informees = plainInformees ++ confirmingParties
topologySnapshot
.activeParticipantsOfPartiesWithAttributes(informees.toSeq)
.map(informeesMap =>
informeesMap.map { case (partyId, attributes) =>
// confirming party
if (confirmingParties.contains(partyId))
partyId -> (attributes.keySet, NonNegativeInt.one)
// plain informee
else partyId -> (attributes.keySet, NonNegativeInt.zero)
}
)
.map(informeesMap => (informeesMap, threshold))
}
override def informeesAndThreshold(
actionNode: LfActionNode,
topologySnapshot: TopologySnapshot,
)(implicit
ec: ExecutionContext,
tc: TraceContext,
): Future[(Map[LfPartyId, NonNegativeInt], NonNegativeInt)] =
Future.successful({
val (plainInformees, confirmingParties) = getPlainInformeesAndConfirmingParties(actionNode)
toInformeesAndThreshold(confirmingParties, plainInformees)
})
}
val values: Seq[ConfirmationPolicy] = Seq[ConfirmationPolicy](Signatory)

View File

@ -69,7 +69,7 @@ object DomainParameters {
* participant skips in catch-up mode, and the number of catch-up intervals
* intervals a participant should lag behind in order to enter catch-up mode.
*/
final case class StaticDomainParameters private (
final case class StaticDomainParameters(
requiredSigningKeySchemes: NonEmpty[Set[SigningKeyScheme]],
requiredEncryptionKeySchemes: NonEmpty[Set[EncryptionKeyScheme]],
requiredSymmetricKeySchemes: NonEmpty[Set[SymmetricKeyScheme]],
@ -82,9 +82,6 @@ final case class StaticDomainParameters private (
StaticDomainParameters.type
] = StaticDomainParameters.protocolVersionRepresentativeFor(protocolVersion)
// Ensures the invariants related to default values hold
validateInstance().valueOr(err => throw new IllegalArgumentException(err))
@transient override protected lazy val companionObj: StaticDomainParameters.type =
StaticDomainParameters
@ -116,22 +113,6 @@ object StaticDomainParameters
override def name: String = "static domain parameters"
def create(
requiredSigningKeySchemes: NonEmpty[Set[SigningKeyScheme]],
requiredEncryptionKeySchemes: NonEmpty[Set[EncryptionKeyScheme]],
requiredSymmetricKeySchemes: NonEmpty[Set[SymmetricKeyScheme]],
requiredHashAlgorithms: NonEmpty[Set[HashAlgorithm]],
requiredCryptoKeyFormats: NonEmpty[Set[CryptoKeyFormat]],
protocolVersion: ProtocolVersion,
): StaticDomainParameters = StaticDomainParameters(
requiredSigningKeySchemes = requiredSigningKeySchemes,
requiredEncryptionKeySchemes = requiredEncryptionKeySchemes,
requiredSymmetricKeySchemes = requiredSymmetricKeySchemes,
requiredHashAlgorithms = requiredHashAlgorithms,
requiredCryptoKeyFormats = requiredCryptoKeyFormats,
protocolVersion = protocolVersion,
)
private def requiredKeySchemes[P, A](
field: String,
content: Seq[P],
@ -196,6 +177,10 @@ object StaticDomainParameters
*/
sealed trait OnboardingRestriction extends Product with Serializable {
def toProtoV30: v30.OnboardingRestriction
def isLocked: Boolean
def isRestricted: Boolean
final def isOpen: Boolean = !isLocked
final def isUnrestricted: Boolean = !isRestricted
}
object OnboardingRestriction {
def fromProtoV30(
@ -218,12 +203,18 @@ object OnboardingRestriction {
final case object UnrestrictedOpen extends OnboardingRestriction {
override def toProtoV30: v30.OnboardingRestriction =
v30.OnboardingRestriction.ONBOARDING_RESTRICTION_UNRESTRICTED_OPEN
override def isLocked: Boolean = false
override def isRestricted: Boolean = false
}
/** In theory, anyone can join, except now, the registration procedure is closed */
final case object UnrestrictedLocked extends OnboardingRestriction {
override def toProtoV30: v30.OnboardingRestriction =
v30.OnboardingRestriction.ONBOARDING_RESTRICTION_UNRESTRICTED_LOCKED
override def isLocked: Boolean = true
override def isRestricted: Boolean = false
}
/** Only participants on the allowlist can join
@ -233,12 +224,18 @@ object OnboardingRestriction {
final case object RestrictedOpen extends OnboardingRestriction {
override def toProtoV30: v30.OnboardingRestriction =
v30.OnboardingRestriction.ONBOARDING_RESTRICTION_RESTRICTED_OPEN
override def isLocked: Boolean = false
override def isRestricted: Boolean = true
}
/** Only participants on the allowlist can join in theory, except now, the registration procedure is closed */
final case object RestrictedLocked extends OnboardingRestriction {
override def toProtoV30: v30.OnboardingRestriction =
v30.OnboardingRestriction.ONBOARDING_RESTRICTION_RESTRICTED_LOCKED
override def isLocked: Boolean = true
override def isRestricted: Boolean = true
}
}

View File

@ -0,0 +1,93 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.protocol
import com.digitalasset.canton
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.topology.DomainId
import com.digitalasset.canton.version.*
import com.google.protobuf.ByteString
/** Domain-wide dynamic sequencing parameters.
*
* @param payload The opaque payload of the domain-wide dynamic sequencing parameters;
* its content is sequencer-dependent and domain owners are responsible
* for ensuring that it can be correctly interpreted by the sequencers in use.
* If no payload is provided, sequencer-specific default values are used.
* If the payload cannot be correctly interpreted or the parameters cannot
* be set due to dynamic conditions, their value will not change.
*/
final case class DynamicSequencingParameters(payload: Option[ByteString])(
override val representativeProtocolVersion: RepresentativeProtocolVersion[
DynamicSequencingParameters.type
]
) extends HasProtocolVersionedWrapper[DynamicSequencingParameters]
with PrettyPrinting {
@transient override protected lazy val companionObj: DynamicSequencingParameters.type =
DynamicSequencingParameters
override def pretty: Pretty[DynamicSequencingParameters] =
prettyOfClass(
paramWithoutValue("payload", _.payload.isDefined)
)
def toProtoV30: v30.DynamicSequencingParameters =
v30.DynamicSequencingParameters(
payload.fold(ByteString.empty())(identity)
)
}
object DynamicSequencingParameters
extends HasProtocolVersionedCompanion[DynamicSequencingParameters] {
def default(
representativeProtocolVersion: RepresentativeProtocolVersion[
DynamicSequencingParameters.type
]
): DynamicSequencingParameters =
DynamicSequencingParameters(None)(representativeProtocolVersion)
override val supportedProtoVersions
: canton.protocol.DynamicSequencingParameters.SupportedProtoVersions =
SupportedProtoVersions(
ProtoVersion(30) -> VersionedProtoConverter(ProtocolVersion.v31)(
v30.DynamicSequencingParameters
)(
supportedProtoVersion(_)(fromProtoV30),
_.toProtoV30.toByteString,
)
)
override def name: String = "dynamic sequencing parameters"
def fromProtoV30(
sequencingDynamicParameters: v30.DynamicSequencingParameters
): ParsingResult[DynamicSequencingParameters] = {
val payload = sequencingDynamicParameters.payload
for {
rpv <- protocolVersionRepresentativeFor(ProtoVersion(30))
} yield DynamicSequencingParameters(Option.when(!payload.isEmpty)(payload))(rpv)
}
}
/** Dynamic sequencing parameters and their validity interval.
*
* @param validFrom Start point of the validity interval (exclusive)
* @param validUntil End point of the validity interval (inclusive)
*/
final case class DynamicSequencingParametersWithValidity(
parameters: DynamicSequencingParameters,
validFrom: CantonTimestamp,
validUntil: Option[CantonTimestamp],
domainId: DomainId,
) {
def map[T](f: DynamicSequencingParameters => T): DomainParameters.WithValidity[T] =
DomainParameters.WithValidity(validFrom, validUntil, f(parameters))
def isValidAt(ts: CantonTimestamp): Boolean =
validFrom < ts && validUntil.forall(ts <= _)
}

View File

@ -89,7 +89,6 @@ case class SerializableContract(
def toLf: LfNodeCreate = LfNodeCreate(
coid = contractId,
packageName = rawContractInstance.contractInstance.unversioned.packageName,
packageVersion = None,
templateId = rawContractInstance.contractInstance.unversioned.template,
arg = rawContractInstance.contractInstance.unversioned.arg,
signatories = metadata.signatories,

View File

@ -8,13 +8,15 @@ import cats.data.EitherT
import cats.syntax.either.*
import cats.syntax.traverse.*
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.crypto.DecryptionError.{InvalidEncryptionKey, InvariantViolation}
import com.digitalasset.canton.crypto.SyncCryptoError.SyncCryptoDecryptionError
import com.digitalasset.canton.crypto.*
import com.digitalasset.canton.crypto.store.CryptoPrivateStoreError
import com.digitalasset.canton.crypto.store.CryptoPrivateStoreError.FailedToReadKey
import com.digitalasset.canton.crypto.store.{CryptoPrivateStoreError, CryptoPublicStore}
import com.digitalasset.canton.data.ViewType
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.protocol.messages.EncryptedView.checkEncryptionKeyScheme
import com.digitalasset.canton.protocol.messages.EncryptedViewMessageError.{
SessionKeyCreationError,
SyncCryptoDecryptError,
@ -31,7 +33,7 @@ import com.digitalasset.canton.util.*
import com.digitalasset.canton.version.*
import com.google.protobuf.ByteString
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.ExecutionContext
/** An encrypted [[com.digitalasset.canton.data.ViewTree]] together with its [[com.digitalasset.canton.data.ViewType]].
* The correspondence is encoded via a path-dependent type.
@ -98,10 +100,9 @@ object EncryptedView {
encryptionOps: EncryptionOps,
viewKey: SymmetricKey,
aViewType: VT,
version: ProtocolVersion,
)(aViewTree: aViewType.View): Either[EncryptionError, EncryptedView[VT]] =
encryptionOps
.encryptWith(CompressedView(aViewTree), viewKey, version)
.encryptWith(CompressedView(aViewTree), viewKey)
.map(apply(aViewType))
def decrypt[VT <: ViewType](
@ -124,17 +125,17 @@ object EncryptedView {
* and we want to avoid that this is applied to [[com.digitalasset.canton.serialization.HasCryptographicEvidence]]
* instances.
*/
final case class CompressedView[+V <: HasVersionedToByteString] private (value: V)
extends HasVersionedToByteString {
override def toByteString(version: ProtocolVersion): ByteString =
ByteStringUtil.compressGzip(value.toByteString(version))
final case class CompressedView[+V <: HasToByteString] private (value: V)
extends HasToByteString {
override def toByteString: ByteString =
ByteStringUtil.compressGzip(value.toByteString)
}
object CompressedView {
private[EncryptedView] def apply[V <: HasVersionedToByteString](value: V): CompressedView[V] =
private[EncryptedView] def apply[V <: HasToByteString](value: V): CompressedView[V] =
new CompressedView(value)
private[EncryptedView] def fromByteString[V <: HasVersionedToByteString](
private[EncryptedView] def fromByteString[V <: HasToByteString](
deserialize: ByteString => Either[DeserializationError, V]
)(bytes: ByteString): Either[DeserializationError, CompressedView[V]] =
// TODO(i10428) Make sure that this view does not explode into an arbitrarily large object
@ -144,6 +145,36 @@ object EncryptedView {
.map(CompressedView(_))
}
// TODO(#12757): after we decouple crypto key scheme from encryption scheme we don't need to check the key but rather the AsymmetricEncrypted(<message>)
def checkEncryptionKeyScheme(
cryptoPublicStore: CryptoPublicStore,
keyId: Fingerprint,
allowedEncryptionKeySchemes: NonEmpty[Set[EncryptionKeyScheme]],
)(implicit
executionContext: ExecutionContext,
traceContext: TraceContext,
): EitherT[FutureUnlessShutdown, InvalidEncryptionKey, Unit] =
for {
encryptionKey <- cryptoPublicStore
.findEncryptionKeyIdByFingerprint(keyId)
.leftMap(err => DecryptionError.InvalidEncryptionKey(err.show))
_ <- encryptionKey match {
case Some(encPubKey) =>
EitherT.cond[FutureUnlessShutdown](
allowedEncryptionKeySchemes.contains(encPubKey.scheme),
(),
DecryptionError.InvalidEncryptionKey(
s"The encryption key scheme ${encPubKey.scheme} of key $keyId is not part of the " +
s"required schemes: $allowedEncryptionKeySchemes"
),
)
case None =>
EitherT.leftT[FutureUnlessShutdown, Unit](
DecryptionError.InvalidEncryptionKey(s"Encryption key $keyId not found")
)
}
} yield ()
}
/** An encrypted view message.
@ -323,6 +354,7 @@ object EncryptedViewMessage extends HasProtocolVersionedCompanion[EncryptedViewM
}
def decryptRandomness[VT <: ViewType](
allowedEncryptionKeySchemes: NonEmpty[Set[EncryptionKeyScheme]],
snapshot: DomainSnapshotSyncCryptoApi,
sessionKeyStore: SessionKeyStore,
encrypted: EncryptedViewMessage[VT],
@ -337,13 +369,13 @@ object EncryptedViewMessage extends HasProtocolVersionedCompanion[EncryptedViewM
def decryptViewRandomness(
sessionKeyRandomness: SecureRandomness
): EitherT[Future, EncryptedViewMessageError, SecureRandomness] =
): EitherT[FutureUnlessShutdown, EncryptedViewMessageError, SecureRandomness] =
for {
// derive symmetric key from randomness
sessionKey <- pureCrypto
.createSymmetricKey(sessionKeyRandomness, encrypted.viewEncryptionScheme)
.leftMap[EncryptedViewMessageError](SessionKeyCreationError)
.toEitherT[Future]
.toEitherT[FutureUnlessShutdown]
randomness <- pureCrypto
.decryptWith(encrypted.randomness, sessionKey)(
SecureRandomness.fromByteString(randomnessLength)
@ -351,7 +383,7 @@ object EncryptedViewMessage extends HasProtocolVersionedCompanion[EncryptedViewM
.leftMap[EncryptedViewMessageError](
EncryptedViewMessageError.SymmetricDecryptError
)
.toEitherT[Future]
.toEitherT[FutureUnlessShutdown]
} yield randomness
encrypted.sessionKey
@ -375,9 +407,10 @@ object EncryptedViewMessage extends HasProtocolVersionedCompanion[EncryptedViewM
* correct rights to do so, but this participant does not have the corresponding private key in the store.
*/
encryptionKeys <- EitherT
.right(snapshot.ipsSnapshot.encryptionKeys(participantId))
.right(
FutureUnlessShutdown.outcomeF(snapshot.ipsSnapshot.encryptionKeys(participantId))
)
.map(_.map(_.id).toSet)
.mapK(FutureUnlessShutdown.outcomeK)
encryptedSessionKeyForParticipant <- encrypted.sessionKey
.find(e => encryptionKeys.contains(e.encryptedFor))
.toRight(
@ -399,6 +432,17 @@ object EncryptedViewMessage extends HasProtocolVersionedCompanion[EncryptedViewM
),
)
}
_ <- checkEncryptionKeyScheme(
snapshot.crypto.cryptoPublicStore,
encryptedSessionKeyForParticipant.encryptedFor,
allowedEncryptionKeySchemes,
)
.leftMap(err =>
EncryptedViewMessageError
.SyncCryptoDecryptError(
SyncCryptoDecryptionError(err)
)
)
// we get the randomness for the session key from the message or by searching the cache,
// which means that a previous view with the same recipients has been received before.
@ -418,7 +462,7 @@ object EncryptedViewMessage extends HasProtocolVersionedCompanion[EncryptedViewM
SyncCryptoDecryptionError(err)
)
)
viewRandomness <- decryptViewRandomness(skRandom).mapK(FutureUnlessShutdown.outcomeK)
viewRandomness <- decryptViewRandomness(skRandom)
} yield viewRandomness
}
}
@ -430,8 +474,8 @@ object EncryptedViewMessage extends HasProtocolVersionedCompanion[EncryptedViewM
private def eitherT[VT <: ViewType, B](value: Either[EncryptedViewMessageError, B])(implicit
ec: ExecutionContext
): EitherT[Future, EncryptedViewMessageError, B] =
EitherT.fromEither[Future](value)
): EitherT[FutureUnlessShutdown, EncryptedViewMessageError, B] =
EitherT.fromEither[FutureUnlessShutdown](value)
def computeRandomnessLength(pureCrypto: CryptoPureApi): Int =
pureCrypto.defaultHashAlgorithm.length.toInt
@ -444,14 +488,14 @@ object EncryptedViewMessage extends HasProtocolVersionedCompanion[EncryptedViewM
viewRandomness: SecureRandomness,
)(deserialize: ByteString => Either[DeserializationError, encrypted.encryptedView.viewType.View])(
implicit ec: ExecutionContext
): EitherT[Future, EncryptedViewMessageError, VT#View] = {
): EitherT[FutureUnlessShutdown, EncryptedViewMessageError, VT#View] = {
val pureCrypto = snapshot.pureCrypto
val viewKeyLength = encrypted.viewEncryptionScheme.keySizeInBytes
val randomnessLength = computeRandomnessLength(snapshot.pureCrypto)
for {
_ <- EitherT.cond[Future](
_ <- EitherT.cond[FutureUnlessShutdown](
viewRandomness.unwrap.size == randomnessLength,
(),
EncryptedViewMessageError.WrongRandomnessLength(
@ -491,6 +535,7 @@ object EncryptedViewMessage extends HasProtocolVersionedCompanion[EncryptedViewM
}
def decryptFor[VT <: ViewType](
staticDomainParameters: StaticDomainParameters,
snapshot: DomainSnapshotSyncCryptoApi,
sessionKeyStore: SessionKeyStore,
encrypted: EncryptedViewMessage[VT],
@ -502,17 +547,38 @@ object EncryptedViewMessage extends HasProtocolVersionedCompanion[EncryptedViewM
tc: TraceContext,
): EitherT[FutureUnlessShutdown, EncryptedViewMessageError, VT#View] = {
val decryptedRandomness =
decryptRandomness(snapshot, sessionKeyStore, encrypted, participantId)
// verify that the view symmetric encryption scheme is part of the required schemes
if (
!staticDomainParameters.requiredSymmetricKeySchemes
.contains(encrypted.viewEncryptionScheme)
) {
EitherT.leftT[FutureUnlessShutdown, VT#View](
EncryptedViewMessageError.SymmetricDecryptError(
InvariantViolation(
s"The view symmetric encryption scheme ${encrypted.viewEncryptionScheme} is not " +
s"part of the required schemes: ${staticDomainParameters.requiredSymmetricKeySchemes}"
)
)
)
} else {
val decryptedRandomness =
decryptRandomness(
staticDomainParameters.requiredEncryptionKeySchemes,
snapshot,
sessionKeyStore,
encrypted,
participantId,
)
for {
viewRandomness <- optViewRandomness.fold(
decryptedRandomness
)(r => EitherT.pure(r))
decrypted <- decryptWithRandomness(snapshot, encrypted, viewRandomness)(
deserialize
).mapK(FutureUnlessShutdown.outcomeK)
} yield decrypted
for {
viewRandomness <- optViewRandomness.fold(
decryptedRandomness
)(r => EitherT.pure(r))
decrypted <- decryptWithRandomness(snapshot, encrypted, viewRandomness)(
deserialize
)
} yield decrypted
}
}
implicit val encryptedViewMessageCast

View File

@ -10,7 +10,7 @@ import com.digitalasset.canton.crypto.HashOps
import com.digitalasset.canton.protocol.messages.ProtocolMessage.ProtocolMessageContentCast
import com.digitalasset.canton.protocol.v30
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.version.Transfer.TargetProtocolVersion
import com.digitalasset.canton.version.Transfer.{SourceProtocolVersion, TargetProtocolVersion}
import com.digitalasset.canton.version.*
import com.google.protobuf.ByteString
@ -68,7 +68,9 @@ object EnvelopeContent
case Content.EncryptedViewMessage(messageP) =>
EncryptedViewMessage.fromProto(messageP)
case Content.TransferOutMediatorMessage(messageP) =>
TransferOutMediatorMessage.fromProtoV30(context)(messageP)
TransferOutMediatorMessage.fromProtoV30(
(hashOps, SourceProtocolVersion(expectedProtocolVersion))
)(messageP)
case Content.TransferInMediatorMessage(messageP) =>
TransferInMediatorMessage.fromProtoV30(
(hashOps, TargetProtocolVersion(expectedProtocolVersion))

View File

@ -3,9 +3,13 @@
package com.digitalasset.canton.protocol.messages
import com.digitalasset.canton.config.RequireTypes.NonNegativeInt
import com.digitalasset.canton.crypto.{HashOps, Signature}
import com.digitalasset.canton.data.{FullInformeeTree, Informee, ViewPosition, ViewType}
import com.digitalasset.canton.data.{
FullInformeeTree,
ViewConfirmationParameters,
ViewPosition,
ViewType,
}
import com.digitalasset.canton.logging.pretty.Pretty
import com.digitalasset.canton.protocol.messages.ProtocolMessage.ProtocolMessageContentCast
import com.digitalasset.canton.protocol.{RootHash, v30}
@ -56,8 +60,8 @@ case class InformeeMessage(
override def mediator: MediatorGroupRecipient = fullInformeeTree.mediator
override def informeesAndThresholdByViewPosition
: Map[ViewPosition, (Set[Informee], NonNegativeInt)] =
override def informeesAndConfirmationParamsByViewPosition
: Map[ViewPosition, ViewConfirmationParameters] =
fullInformeeTree.informeesAndThresholdByViewPosition
// Implementing a `toProto<version>` method allows us to compose serializable classes.
@ -72,9 +76,6 @@ case class InformeeMessage(
override def toProtoSomeEnvelopeContentV30: v30.EnvelopeContent.SomeEnvelopeContent =
v30.EnvelopeContent.SomeEnvelopeContent.InformeeMessage(toProtoV30)
override def minimumThreshold(informees: Set[Informee]): NonNegativeInt =
fullInformeeTree.confirmationPolicy.minimumThreshold(informees)
override def rootHash: RootHash = fullInformeeTree.transactionId.toRootHash
override def viewType: ViewType = ViewType.TransactionViewType

View File

@ -4,9 +4,8 @@
package com.digitalasset.canton.protocol.messages
import com.digitalasset.canton.LfPartyId
import com.digitalasset.canton.config.RequireTypes.NonNegativeInt
import com.digitalasset.canton.crypto.Signature
import com.digitalasset.canton.data.{Informee, ViewPosition, ViewType}
import com.digitalasset.canton.data.{ViewConfirmationParameters, ViewPosition, ViewType}
import com.digitalasset.canton.protocol.RootHash
import com.digitalasset.canton.protocol.messages.ProtocolMessage.ProtocolMessageContentCast
import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient
@ -19,21 +18,17 @@ trait MediatorConfirmationRequest extends UnsignedProtocolMessage {
def mediator: MediatorGroupRecipient
def informeesAndThresholdByViewPosition: Map[ViewPosition, (Set[Informee], NonNegativeInt)]
def informeesAndConfirmationParamsByViewPosition: Map[ViewPosition, ViewConfirmationParameters]
def allInformees: Set[LfPartyId] =
informeesAndThresholdByViewPosition
.flatMap { case (_, (informees, _)) =>
informeesAndConfirmationParamsByViewPosition.flatMap {
case (_, ViewConfirmationParameters(informees, _)) =>
informees
}
.map(_.party)
.toSet
}.toSet
/** Determines whether the mediator may disclose informees as part of its result message. */
def informeesArePublic: Boolean
def minimumThreshold(informees: Set[Informee]): NonNegativeInt
/** Returns the hash that all [[com.digitalasset.canton.protocol.messages.RootHashMessage]]s of the request batch should contain.
*/
def rootHash: RootHash

View File

@ -6,7 +6,12 @@ package com.digitalasset.canton.protocol.messages
import com.digitalasset.canton.ProtoDeserializationError.OtherError
import com.digitalasset.canton.config.RequireTypes.NonNegativeInt
import com.digitalasset.canton.crypto.{HashOps, Signature}
import com.digitalasset.canton.data.{Informee, TransferInViewTree, ViewPosition, ViewType}
import com.digitalasset.canton.data.{
TransferInViewTree,
ViewConfirmationParameters,
ViewPosition,
ViewType,
}
import com.digitalasset.canton.logging.pretty.Pretty
import com.digitalasset.canton.protocol.*
import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient
@ -54,15 +59,18 @@ final case class TransferInMediatorMessage(
override def requestUuid: UUID = commonData.uuid
override def informeesAndThresholdByViewPosition
: Map[ViewPosition, (Set[Informee], NonNegativeInt)] = {
override def informeesAndConfirmationParamsByViewPosition
: Map[ViewPosition, ViewConfirmationParameters] = {
val confirmingParties = commonData.confirmingParties
val threshold = NonNegativeInt.tryCreate(confirmingParties.size)
Map(tree.viewPosition -> ((confirmingParties, threshold)))
Map(
tree.viewPosition -> ViewConfirmationParameters.createOnlyWithConfirmers(
confirmingParties,
threshold,
)
)
}
override def minimumThreshold(informees: Set[Informee]): NonNegativeInt = NonNegativeInt.one
override def toProtoSomeEnvelopeContentV30: v30.EnvelopeContent.SomeEnvelopeContent =
v30.EnvelopeContent.SomeEnvelopeContent.TransferInMediatorMessage(toProtoV30)
@ -105,7 +113,7 @@ object TransferInMediatorMessage
for {
tree <- ProtoConverter
.required("TransferInMediatorMessage.tree", treePO)
.flatMap(TransferInViewTree.fromProtoV30(context, _))
.flatMap(TransferInViewTree.fromProtoV30(context))
_ <- EitherUtil.condUnitE(
tree.commonData.isFullyUnblinded,
OtherError(s"Transfer-in common data is blinded in request ${tree.rootHash}"),

View File

@ -6,7 +6,12 @@ package com.digitalasset.canton.protocol.messages
import com.digitalasset.canton.ProtoDeserializationError.OtherError
import com.digitalasset.canton.config.RequireTypes.NonNegativeInt
import com.digitalasset.canton.crypto.{HashOps, Signature}
import com.digitalasset.canton.data.{Informee, TransferOutViewTree, ViewPosition, ViewType}
import com.digitalasset.canton.data.{
TransferOutViewTree,
ViewConfirmationParameters,
ViewPosition,
ViewType,
}
import com.digitalasset.canton.logging.pretty.Pretty
import com.digitalasset.canton.protocol.*
import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient
@ -14,6 +19,7 @@ import com.digitalasset.canton.serialization.ProtoConverter
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.topology.{DomainId, ParticipantId}
import com.digitalasset.canton.util.EitherUtil
import com.digitalasset.canton.version.Transfer.SourceProtocolVersion
import com.digitalasset.canton.version.{
HasProtocolVersionedWithContextCompanion,
ProtoVersion,
@ -40,7 +46,7 @@ final case class TransferOutMediatorMessage(
override def submittingParticipant: ParticipantId = tree.submittingParticipant
val protocolVersion = commonData.protocolVersion
val protocolVersion: SourceProtocolVersion = commonData.sourceProtocolVersion
override val representativeProtocolVersion
: RepresentativeProtocolVersion[TransferOutMediatorMessage.type] =
@ -52,15 +58,18 @@ final case class TransferOutMediatorMessage(
override def requestUuid: UUID = commonData.uuid
override def informeesAndThresholdByViewPosition
: Map[ViewPosition, (Set[Informee], NonNegativeInt)] = {
override def informeesAndConfirmationParamsByViewPosition
: Map[ViewPosition, ViewConfirmationParameters] = {
val confirmingParties = commonData.confirmingParties
val threshold = NonNegativeInt.tryCreate(confirmingParties.size)
Map(tree.viewPosition -> ((confirmingParties, threshold)))
Map(
tree.viewPosition -> ViewConfirmationParameters.createOnlyWithConfirmers(
confirmingParties,
threshold,
)
)
}
override def minimumThreshold(informees: Set[Informee]): NonNegativeInt = NonNegativeInt.one
def toProtoV30: v30.TransferOutMediatorMessage =
v30.TransferOutMediatorMessage(
tree = Some(tree.toProtoV30),
@ -85,7 +94,7 @@ final case class TransferOutMediatorMessage(
object TransferOutMediatorMessage
extends HasProtocolVersionedWithContextCompanion[
TransferOutMediatorMessage,
(HashOps, ProtocolVersion),
(HashOps, SourceProtocolVersion),
] {
val supportedProtoVersions = SupportedProtoVersions(
@ -97,7 +106,7 @@ object TransferOutMediatorMessage
)
)
def fromProtoV30(context: (HashOps, ProtocolVersion))(
def fromProtoV30(context: (HashOps, SourceProtocolVersion))(
transferOutMediatorMessageP: v30.TransferOutMediatorMessage
): ParsingResult[TransferOutMediatorMessage] = {
val v30.TransferOutMediatorMessage(treePO, submittingParticipantSignaturePO) =

View File

@ -67,10 +67,10 @@ class AuthenticationTokenProvider(
def generateToken(
authenticationClient: SequencerAuthenticationServiceStub
): EitherT[Future, Status, AuthenticationTokenWithExpiry] = {
): EitherT[FutureUnlessShutdown, Status, AuthenticationTokenWithExpiry] = {
// this should be called by a grpc client interceptor
implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext
performUnlessClosingEitherT(functionFullName, shutdownStatus) {
performUnlessClosingEitherU(functionFullName) {
def generateTokenET: FutureUnlessShutdown[Either[Status, AuthenticationTokenWithExpiry]] =
(for {
challenge <- getChallenge(authenticationClient).mapK(FutureUnlessShutdown.outcomeK)

View File

@ -4,10 +4,15 @@
package com.digitalasset.canton.sequencing.authentication.grpc
import cats.data.EitherT
import cats.implicits.*
import com.digitalasset.canton.concurrent.FutureSupervisor
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.discard.Implicits.DiscardOps
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.lifecycle.{
FutureUnlessShutdown,
PromiseUnlessShutdown,
UnlessShutdown,
}
import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.sequencing.authentication.{
AuthenticationToken,
AuthenticationTokenManagerConfig,
@ -18,7 +23,7 @@ import com.digitalasset.canton.util.Thereafter.syntax.*
import io.grpc.Status
import java.util.concurrent.atomic.AtomicReference
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.concurrent.ExecutionContext
import scala.util.{Failure, Success}
final case class AuthenticationTokenWithExpiry(
@ -32,18 +37,23 @@ final case class AuthenticationTokenWithExpiry(
* `getToken` always returns a `EitherT[Future, ...]` but if a token is already available will be completed immediately with that token.
*/
class AuthenticationTokenManager(
obtainToken: TraceContext => EitherT[Future, Status, AuthenticationTokenWithExpiry],
obtainToken: TraceContext => EitherT[
FutureUnlessShutdown,
Status,
AuthenticationTokenWithExpiry,
],
isClosed: => Boolean,
config: AuthenticationTokenManagerConfig,
clock: Clock,
protected val loggerFactory: NamedLoggerFactory,
)(implicit executionContext: ExecutionContext)
)(implicit executionContext: ExecutionContext, traceContext: TraceContext)
extends NamedLogging {
sealed trait State
case object NoToken extends State
case class Refreshing(pending: EitherT[Future, Status, AuthenticationTokenWithExpiry])
extends State
case class Refreshing(
pending: EitherT[FutureUnlessShutdown, Status, AuthenticationTokenWithExpiry]
) extends State
case class HaveToken(token: AuthenticationToken) extends State
private val state = new AtomicReference[State](NoToken)
@ -53,7 +63,7 @@ class AuthenticationTokenManager(
* If there is no token it will cause a token refresh to start and be completed once obtained.
* If there is a refresh already in progress it will be completed with this refresh.
*/
def getToken: EitherT[Future, Status, AuthenticationToken] =
def getToken: EitherT[FutureUnlessShutdown, Status, AuthenticationToken] =
refreshToken(refreshWhenHaveToken = false)
/** Invalid the current token if it matches the provided value.
@ -68,9 +78,13 @@ class AuthenticationTokenManager(
private def refreshToken(
refreshWhenHaveToken: Boolean
): EitherT[Future, Status, AuthenticationToken] = {
val refreshTokenPromise = Promise[Either[Status, AuthenticationTokenWithExpiry]]()
val refreshingState = Refreshing(EitherT(refreshTokenPromise.future))
): EitherT[FutureUnlessShutdown, Status, AuthenticationToken] = {
val refreshTokenPromise =
new PromiseUnlessShutdown[Either[Status, AuthenticationTokenWithExpiry]](
"refreshToken",
FutureSupervisor.Noop,
)(ecl = ErrorLoggingContext.fromTracedLogger(logger), ec = executionContext)
val refreshingState = Refreshing(EitherT(refreshTokenPromise.futureUS))
state.getAndUpdate {
case NoToken => refreshingState
@ -82,7 +96,7 @@ class AuthenticationTokenManager(
// we have a token, so share it
case HaveToken(token) =>
if (refreshWhenHaveToken) createRefreshTokenFuture(refreshTokenPromise)
else EitherT.rightT[Future, Status](token)
else EitherT.rightT[FutureUnlessShutdown, Status](token)
// there is no token yet, so start refreshing and return pending result
case NoToken =>
createRefreshTokenFuture(refreshTokenPromise)
@ -90,12 +104,12 @@ class AuthenticationTokenManager(
}
private def createRefreshTokenFuture(
promise: Promise[Either[Status, AuthenticationTokenWithExpiry]]
): EitherT[Future, Status, AuthenticationToken] = {
promise: PromiseUnlessShutdown[Either[Status, AuthenticationTokenWithExpiry]]
): EitherT[FutureUnlessShutdown, Status, AuthenticationToken] = {
implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext
logger.debug("Refreshing authentication token")
val currentRefresh = promise.future
val currentRefresh = promise.futureUS
def completeRefresh(result: State): Unit = {
state.updateAndGet {
case Refreshing(pending) if pending.value == currentRefresh => result
@ -123,13 +137,18 @@ class AuthenticationTokenManager(
case _ => logger.warn("Token refresh failed", exception)
}
completeRefresh(NoToken)
case Success(Left(error)) =>
case Success(UnlessShutdown.AbortedDueToShutdown) =>
logger.warn(s"Token refresh aborted due to shutdown.")
completeRefresh(NoToken)
case Success(UnlessShutdown.Outcome(Left(error))) =>
if (error.getCode == Status.Code.CANCELLED)
logger.debug("Token refresh cancelled due to shutdown")
else
logger.warn(s"Token refresh encountered error: $error")
completeRefresh(NoToken)
case Success(Right(AuthenticationTokenWithExpiry(newToken, expiresAt))) =>
case Success(
UnlessShutdown.Outcome(Right(AuthenticationTokenWithExpiry(newToken, expiresAt)))
) =>
logger.debug("Token refresh complete")
completeRefresh(HaveToken(newToken))
scheduleRefreshBefore(expiresAt)

View File

@ -6,6 +6,8 @@ package com.digitalasset.canton.sequencing.authentication.grpc
import cats.data.EitherT
import cats.implicits.*
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.lifecycle.UnlessShutdown.AbortedDueToShutdown
import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, UnlessShutdown}
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.networking.Endpoint
import com.digitalasset.canton.sequencing.authentication.{
@ -23,7 +25,7 @@ import io.grpc.internal.GrpcAttributes
import io.grpc.stub.AbstractStub
import java.util.concurrent.Executor
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.ExecutionContext
import scala.util.control.NonFatal
/** Provides call credentials and an interceptor to generate a token for outgoing requests and add the token to the call
@ -84,9 +86,13 @@ private[grpc] class SequencerClientTokenAuthentication(
.withCause(ex)
)
}
.unwrap
.foreach {
case Left(errorStatus) => applier.fail(errorStatus)
case Right(token) => applier.apply(generateMetadata(token, maybeEndpoint))
case AbortedDueToShutdown =>
applier.fail(Status.ABORTED.withDescription("Aborted due to shutdown."))
case UnlessShutdown.Outcome(Left(errorStatus)) => applier.fail(errorStatus)
case UnlessShutdown.Outcome(Right(token)) =>
applier.apply(generateMetadata(token, maybeEndpoint))
}
}
@ -156,13 +162,19 @@ object SequencerClientTokenAuthentication {
domainId: DomainId,
authenticatedMember: AuthenticatedMember,
obtainTokenPerEndpoint: NonEmpty[
Map[Endpoint, TraceContext => EitherT[Future, Status, AuthenticationTokenWithExpiry]]
Map[
Endpoint,
TraceContext => EitherT[FutureUnlessShutdown, Status, AuthenticationTokenWithExpiry],
]
],
isClosed: => Boolean,
tokenManagerConfig: AuthenticationTokenManagerConfig,
clock: Clock,
loggerFactory: NamedLoggerFactory,
)(implicit executionContext: ExecutionContext): SequencerClientAuthentication = {
)(implicit
executionContext: ExecutionContext,
traceContext: TraceContext,
): SequencerClientAuthentication = {
val tokenManagerPerEndpoint = obtainTokenPerEndpoint.transform { case (_, obtainToken) =>
new AuthenticationTokenManager(
obtainToken,

View File

@ -33,7 +33,7 @@ import scala.jdk.DurationConverters.*
class PeriodicAcknowledgements(
isHealthy: => Boolean,
interval: FiniteDuration,
fetchLatestCleanTimestamp: TraceContext => Future[Option[CantonTimestamp]],
fetchLatestCleanTimestamp: TraceContext => FutureUnlessShutdown[Option[CantonTimestamp]],
acknowledge: Traced[CantonTimestamp] => EitherT[FutureUnlessShutdown, String, Boolean],
clock: Clock,
override protected val timeouts: ProcessingTimeout,
@ -63,7 +63,6 @@ class PeriodicAcknowledgements(
for {
latestClean <- EitherT
.right(fetchLatestCleanTimestamp(traceContext))
.mapK(FutureUnlessShutdown.outcomeK)
result <- latestClean.fold(EitherT.rightT[FutureUnlessShutdown, String](true))(
ackIfChanged
)
@ -105,8 +104,8 @@ class PeriodicAcknowledgements(
}
object PeriodicAcknowledgements {
type FetchCleanTimestamp = TraceContext => Future[Option[CantonTimestamp]]
val noAcknowledgements: FetchCleanTimestamp = _ => Future.successful(None)
type FetchCleanTimestamp = TraceContext => FutureUnlessShutdown[Option[CantonTimestamp]]
val noAcknowledgements: FetchCleanTimestamp = _ => FutureUnlessShutdown.pure(None)
def create(
interval: FiniteDuration,
@ -140,7 +139,9 @@ object PeriodicAcknowledgements {
)(implicit executionContext: ExecutionContext): FetchCleanTimestamp =
traceContext =>
for {
cursorO <- counterTrackerStore.preheadSequencerCounter(traceContext)
cursorO <- FutureUnlessShutdown.outcomeF(
counterTrackerStore.preheadSequencerCounter(traceContext)
)
timestampO = cursorO.map(_.timestamp)
} yield timestampO
}

View File

@ -585,7 +585,6 @@ abstract class SequencerClientImpl(
} else
sequencersTransportState.transport
.sendAsyncUnauthenticatedVersioned(request, timeout)
.mapK(FutureUnlessShutdown.outcomeK)
}
.leftSemiflatMap { err =>
// increment appropriate error metrics
@ -844,7 +843,6 @@ abstract class SequencerClientImpl(
)
result <- sequencersTransportState.transport
.acknowledgeSigned(signedRequest)
.mapK(FutureUnlessShutdown.outcomeK)
} yield result
}

View File

@ -294,7 +294,10 @@ object SequencerClientFactory {
private def grpcSequencerClientAuth(
connection: GrpcSequencerConnection,
member: Member,
)(implicit executionContext: ExecutionContextExecutor): GrpcSequencerClientAuth = {
)(implicit
executionContext: ExecutionContextExecutor,
traceContext: TraceContext,
): GrpcSequencerClientAuth = {
val channelPerEndpoint = connection.endpoints.map { endpoint =>
val subConnection = connection.copy(endpoints = NonEmpty.mk(Seq, endpoint))
endpoint -> createChannel(subConnection)
@ -316,6 +319,7 @@ object SequencerClientFactory {
executionContext: ExecutionContextExecutor,
executionSequencerFactory: ExecutionSequencerFactory,
materializer: Materializer,
traceContext: TraceContext,
): SequencerClientTransport & SequencerClientTransportPekko = {
val channel = createChannel(connection)
val auth = grpcSequencerClientAuth(connection, member)

View File

@ -29,13 +29,14 @@ trait SequencerClientTransportFactory {
traceContext: TraceContext,
): EitherT[Future, String, NonEmpty[
Map[SequencerAlias, SequencerClientTransport & SequencerClientTransportPekko]
]] =
]] = {
MonadUtil
.sequentialTraverse(sequencerConnections.connections)(conn =>
makeTransport(conn, member, requestSigner)
.map(transport => conn.sequencerAlias -> transport)
)
.map(transports => NonEmptyUtil.fromUnsafe(transports.toMap))
}
def makeTransport(
connection: SequencerConnection,

View File

@ -44,7 +44,7 @@ class GrpcSequencerClientAuth(
clock: Clock,
override protected val timeouts: ProcessingTimeout,
protected val loggerFactory: NamedLoggerFactory,
)(implicit executionContext: ExecutionContext)
)(implicit executionContext: ExecutionContext, traceContext: TraceContext)
extends FlagCloseable
with NamedLogging {

View File

@ -125,12 +125,12 @@ private[transports] abstract class GrpcSequencerClientTransportCommon(
request.content.messageId,
timeout,
SendAsyncUnauthenticatedVersionedResponse.fromSendAsyncVersionedResponseProto,
).mapK(FutureUnlessShutdown.outcomeK)
)
}
override def sendAsyncUnauthenticatedVersioned(request: SubmissionRequest, timeout: Duration)(
implicit traceContext: TraceContext
): EitherT[Future, SendAsyncClientResponseError, Unit] = sendInternal(
): EitherT[FutureUnlessShutdown, SendAsyncClientResponseError, Unit] = sendInternal(
stub =>
stub.sendAsyncUnauthenticatedVersioned(
v30.SendAsyncUnauthenticatedVersionedRequest(submissionRequest = request.toByteString)
@ -147,7 +147,9 @@ private[transports] abstract class GrpcSequencerClientTransportCommon(
messageId: MessageId,
timeout: Duration,
fromResponseProto: Resp => ParsingResult[SendAsyncUnauthenticatedVersionedResponse],
)(implicit traceContext: TraceContext): EitherT[Future, SendAsyncClientResponseError, Unit] = {
)(implicit
traceContext: TraceContext
): EitherT[FutureUnlessShutdown, SendAsyncClientResponseError, Unit] = {
// sends are at-most-once so we cannot retry when unavailable as we don't know if the request has been accepted
val sendAtMostOnce = retryPolicy(retryOnUnavailable = false)
val response =
@ -159,10 +161,12 @@ private[transports] abstract class GrpcSequencerClientTransportCommon(
logPolicy = noLoggingShutdownErrorsLogPolicy,
retryPolicy = sendAtMostOnce,
)
response.biflatMap(
fromGrpcError(_, messageId).toEitherT,
fromResponse(_, fromResponseProto).toEitherT,
)
response
.biflatMap(
fromGrpcError(_, messageId).toEitherT,
fromResponse(_, fromResponseProto).toEitherT,
)
.mapK(FutureUnlessShutdown.outcomeK)
}
private def fromResponse[Proto](
@ -245,7 +249,7 @@ private[transports] abstract class GrpcSequencerClientTransportCommon(
override def acknowledgeSigned(signedRequest: SignedContent[AcknowledgeRequest])(implicit
traceContext: TraceContext
): EitherT[Future, String, Boolean] = {
): EitherT[FutureUnlessShutdown, String, Boolean] = {
val request = signedRequest.content
val timestamp = request.timestamp
logger.debug(s"Acknowledging timestamp: $timestamp")
@ -267,6 +271,7 @@ private[transports] abstract class GrpcSequencerClientTransportCommon(
case x if x.status == io.grpc.Status.UNAVAILABLE => false
}
.leftMap(_.toString)
.mapK(FutureUnlessShutdown.outcomeK)
}
override def downloadTopologyStateForInit(request: TopologyStateForInitRequest)(implicit

View File

@ -37,7 +37,7 @@ trait SequencerClientTransportCommon extends FlagCloseable with SupportsHandshak
timeout: Duration,
)(implicit
traceContext: TraceContext
): EitherT[Future, SendAsyncClientResponseError, Unit]
): EitherT[FutureUnlessShutdown, SendAsyncClientResponseError, Unit]
/** Acknowledge that we have successfully processed all events up to and including the given timestamp.
* The client should then never subscribe for events from before this point.
@ -46,7 +46,7 @@ trait SequencerClientTransportCommon extends FlagCloseable with SupportsHandshak
*/
def acknowledgeSigned(request: SignedContent[AcknowledgeRequest])(implicit
traceContext: TraceContext
): EitherT[Future, String, Boolean]
): EitherT[FutureUnlessShutdown, String, Boolean]
def downloadTopologyStateForInit(request: TopologyStateForInitRequest)(implicit
traceContext: TraceContext

View File

@ -66,12 +66,12 @@ class ReplayingEventsSequencerClientTransport(
/** Does nothing */
override def sendAsyncUnauthenticatedVersioned(request: SubmissionRequest, timeout: Duration)(
implicit traceContext: TraceContext
): EitherT[Future, SendAsyncClientResponseError, Unit] = EitherT.rightT(())
): EitherT[FutureUnlessShutdown, SendAsyncClientResponseError, Unit] = EitherT.rightT(())
/** Does nothing */
override def acknowledgeSigned(request: SignedContent[AcknowledgeRequest])(implicit
traceContext: TraceContext
): EitherT[Future, String, Boolean] =
): EitherT[FutureUnlessShutdown, String, Boolean] =
EitherT.rightT(true)
/** Replays all events in `replayPath` to the handler. */

View File

@ -390,11 +390,11 @@ abstract class ReplayingSendsSequencerClientTransportCommon(
timeout: Duration,
)(implicit
traceContext: TraceContext
): EitherT[Future, SendAsyncClientResponseError, Unit] = EitherT.rightT(())
): EitherT[FutureUnlessShutdown, SendAsyncClientResponseError, Unit] = EitherT.rightT(())
override def acknowledgeSigned(request: SignedContent[AcknowledgeRequest])(implicit
traceContext: TraceContext
): EitherT[Future, String, Boolean] =
): EitherT[FutureUnlessShutdown, String, Boolean] =
EitherT.rightT(true)
override def handshake(request: HandshakeRequest)(implicit

View File

@ -4,6 +4,7 @@
package com.digitalasset.canton.sequencing.traffic
import cats.data.EitherT
import cats.implicits.catsSyntaxAlternativeSeparate
import cats.syntax.bifunctor.*
import cats.syntax.parallel.*
import com.daml.nonempty.NonEmpty
@ -63,6 +64,41 @@ class TrafficPurchasedSubmissionHandler(
val topology: DomainSnapshotSyncCryptoApi = cryptoApi.currentSnapshotApproximation
val snapshot = topology.ipsSnapshot
def send(
maxSequencingTimes: NonEmpty[Seq[CantonTimestamp]],
batch: Batch[OpenEnvelope[SignedProtocolMessage[SetTrafficPurchasedMessage]]],
aggregationRule: AggregationRule,
): EitherT[FutureUnlessShutdown, TrafficControlError, CantonTimestamp] = {
// We don't simply `parTraverse` over `maxSequencingTimes` because as long as at least one request was
// successfully sent, errors (such as max sequencing time already expired) should not result in a failure.
val fut = for {
resultsE <- maxSequencingTimes.forgetNE.parTraverse { maxSequencingTime =>
logger.debug(
s"Submitting traffic purchased entry request for $member with balance ${totalTrafficPurchased.value}, serial ${serial.value} and max sequencing time $maxSequencingTime"
)
sendRequest(sequencerClient, batch, aggregationRule, maxSequencingTime).value
}
(errors, successes) = resultsE.separate
} yield (NonEmpty.from(errors), NonEmpty.from(successes)) match {
case (None, None) =>
// This should never happen because `maxSequencingTimes` is non-empty
throw new IllegalStateException(
"No error or success for a non-empty list of max-sequencing-time"
)
case (Some(errorsNE), None) =>
// None of the requests was successfully sent -- return the first error
Left(errorsNE.head1)
case (_, Some(successesNE)) =>
// At least one of the requests was successfully sent -- return the latest max-sequencing-time
Right(successesNE.last1)
}
EitherT(fut)
}
for {
trafficParams <- EitherT
.fromOptionF(
@ -123,13 +159,8 @@ class TrafficPurchasedSubmissionHandler(
),
)
maxSequencingTimes = computeMaxSequencingTimes(trafficParams)
_ <- maxSequencingTimes.forgetNE.parTraverse_ { maxSequencingTime =>
logger.debug(
s"Submitting traffic purchased entry request for $member with balance ${totalTrafficPurchased.value}, serial ${serial.value} and max sequencing time $maxSequencingTime"
)
sendRequest(sequencerClient, batch, aggregationRule, maxSequencingTime)
}
} yield maxSequencingTimes.last1
latestMaxSequencingTime <- send(maxSequencingTimes, batch, aggregationRule)
} yield latestMaxSequencingTime
}
private def sendRequest(
@ -140,7 +171,7 @@ class TrafficPurchasedSubmissionHandler(
)(implicit
ec: ExecutionContext,
traceContext: TraceContext,
): EitherT[FutureUnlessShutdown, TrafficControlError, Unit] = {
): EitherT[FutureUnlessShutdown, TrafficControlError, CantonTimestamp] = {
val callback = SendCallback.future
for {
_ <- sequencerClient
@ -175,7 +206,7 @@ class TrafficPurchasedSubmissionHandler(
)
}
).leftWiden[TrafficControlError]
} yield ()
} yield maxSequencingTime
}
private def computeMaxSequencingTimes(

View File

@ -14,7 +14,12 @@ import com.digitalasset.canton.ProtoDeserializationError.{
StringConversionError,
TimestampConversionError,
}
import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt, PositiveLong}
import com.digitalasset.canton.config.RequireTypes.{
NonNegativeInt,
NonNegativeLong,
PositiveInt,
PositiveLong,
}
import com.digitalasset.canton.protocol.{LfContractId, LfTemplateId}
import com.digitalasset.canton.util.OptionUtil
import com.digitalasset.canton.{
@ -111,6 +116,9 @@ object ProtoConverter {
def parsePositiveLong(l: Long): ParsingResult[PositiveLong] =
PositiveLong.create(l).leftMap(ProtoDeserializationError.InvariantViolation(_))
def parseNonNegativeInt(i: Int): ParsingResult[NonNegativeInt] =
NonNegativeInt.create(i).leftMap(ProtoDeserializationError.InvariantViolation(_))
def parseNonNegativeLong(l: Long): ParsingResult[NonNegativeLong] =
NonNegativeLong.create(l).leftMap(ProtoDeserializationError.InvariantViolation(_))

View File

@ -7,10 +7,14 @@ import cats.data.EitherT
import com.daml.lf.data.Ref.PackageId
import com.digitalasset.canton.concurrent.FutureSupervisor
import com.digitalasset.canton.config.{BatchingConfig, CachingConfigs, ProcessingTimeout}
import com.digitalasset.canton.crypto.SigningPublicKey
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.protocol.DynamicDomainParametersWithValidity
import com.digitalasset.canton.protocol.{
DynamicDomainParametersWithValidity,
DynamicSequencingParametersWithValidity,
}
import com.digitalasset.canton.time.{Clock, DomainTimeTracker}
import com.digitalasset.canton.topology.*
import com.digitalasset.canton.topology.client.PartyTopologySnapshotClient.PartyInfo
@ -284,11 +288,21 @@ private class ForwardingTopologySnapshotClient(
override def isMemberKnown(member: Member)(implicit traceContext: TraceContext): Future[Boolean] =
parent.isMemberKnown(member)
override def memberFirstKnownAt(member: Member)(implicit
traceContext: TraceContext
): Future[Option[CantonTimestamp]] =
parent.memberFirstKnownAt(member)
override def findDynamicDomainParameters()(implicit
traceContext: TraceContext
): Future[Either[String, DynamicDomainParametersWithValidity]] =
parent.findDynamicDomainParameters()
override def findDynamicSequencingParameters()(implicit
traceContext: TraceContext
): Future[Either[String, DynamicSequencingParametersWithValidity]] =
parent.findDynamicSequencingParameters()
/** List all the dynamic domain parameters (past and current) */
override def listDynamicDomainParametersChanges()(implicit
traceContext: TraceContext
@ -308,6 +322,11 @@ private class ForwardingTopologySnapshotClient(
parties: Set[LfPartyId]
)(implicit traceContext: TraceContext): Future[PartyTopologySnapshotClient.AuthorityOfResponse] =
parent.authorityOf(parties)
override def signingKeysUS(owner: Member)(implicit
traceContext: TraceContext
): FutureUnlessShutdown[Seq[SigningPublicKey]] =
parent.signingKeysUS(owner)
}
class CachingTopologySnapshot(
@ -390,6 +409,11 @@ class CachingTopologySnapshot(
private val domainParametersCache =
new AtomicReference[Option[Future[Either[String, DynamicDomainParametersWithValidity]]]](None)
private val sequencingDynamicParametersCache =
new AtomicReference[Option[Future[Either[String, DynamicSequencingParametersWithValidity]]]](
None
)
private val domainParametersChangesCache =
new AtomicReference[
Option[Future[Seq[DynamicDomainParametersWithValidity]]]
@ -480,6 +504,17 @@ class CachingTopologySnapshot(
override def isMemberKnown(member: Member)(implicit traceContext: TraceContext): Future[Boolean] =
memberCache.get(member)
override def memberFirstKnownAt(
member: Member
)(implicit traceContext: TraceContext): Future[Option[CantonTimestamp]] = {
isMemberKnown(member).flatMap {
// TODO(#18399): Consider caching this call as well,
// should only happen during first time member is registered at a sequencer
case true => parent.memberFirstKnownAt(member)
case false => Future.successful(None)
}
}
/** Returns the value if it is present in the cache. Otherwise, use the
* `getter` to fetch it and cache the result.
*/
@ -498,6 +533,11 @@ class CachingTopologySnapshot(
): Future[Either[String, DynamicDomainParametersWithValidity]] =
getAndCache(domainParametersCache, parent.findDynamicDomainParameters())
override def findDynamicSequencingParameters()(implicit
traceContext: TraceContext
): Future[Either[String, DynamicSequencingParametersWithValidity]] =
getAndCache(sequencingDynamicParametersCache, parent.findDynamicSequencingParameters())
/** List all the dynamic domain parameters (past and current) */
override def listDynamicDomainParametersChanges()(implicit
traceContext: TraceContext
@ -511,4 +551,9 @@ class CachingTopologySnapshot(
parties: Set[LfPartyId]
)(implicit traceContext: TraceContext): Future[PartyTopologySnapshotClient.AuthorityOfResponse] =
authorityOfCache.get(parties)
override def signingKeysUS(owner: Member)(implicit
traceContext: TraceContext
): FutureUnlessShutdown[Seq[SigningPublicKey]] =
FutureUnlessShutdown.outcomeF(signingKeys(owner))
}

View File

@ -19,6 +19,7 @@ import com.digitalasset.canton.logging.NamedLogging
import com.digitalasset.canton.protocol.{
DynamicDomainParameters,
DynamicDomainParametersWithValidity,
DynamicSequencingParametersWithValidity,
}
import com.digitalasset.canton.sequencing.TrafficControlParameters
import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient
@ -345,6 +346,10 @@ trait KeyTopologySnapshotClient {
/** returns all signing keys */
def signingKeys(owner: Member)(implicit traceContext: TraceContext): Future[Seq[SigningPublicKey]]
def signingKeysUS(owner: Member)(implicit
traceContext: TraceContext
): FutureUnlessShutdown[Seq[SigningPublicKey]]
def signingKeys(members: Seq[Member])(implicit
traceContext: TraceContext
): Future[Map[Member, Seq[SigningPublicKey]]]
@ -510,6 +515,10 @@ trait DomainGovernanceSnapshotClient {
traceContext: TraceContext
): Future[Either[String, DynamicDomainParametersWithValidity]]
def findDynamicSequencingParameters()(implicit
traceContext: TraceContext
): Future[Either[String, DynamicSequencingParametersWithValidity]]
/** List all the dynamic domain parameters (past and current) */
def listDynamicDomainParametersChanges()(implicit
traceContext: TraceContext
@ -522,6 +531,10 @@ trait MembersTopologySnapshotClient {
def allMembers()(implicit traceContext: TraceContext): Future[Set[Member]]
def isMemberKnown(member: Member)(implicit traceContext: TraceContext): Future[Boolean]
def memberFirstKnownAt(member: Member)(implicit
traceContext: TraceContext
): Future[Option[CantonTimestamp]]
}
trait TopologySnapshot

View File

@ -7,11 +7,15 @@ import cats.data.EitherT
import cats.syntax.functorFilter.*
import com.daml.lf.data.Ref.PackageId
import com.digitalasset.canton.LfPartyId
import com.digitalasset.canton.crypto.KeyPurpose
import com.digitalasset.canton.crypto.{KeyPurpose, SigningPublicKey}
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.discard.Implicits.DiscardOps
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.protocol.DynamicDomainParametersWithValidity
import com.digitalasset.canton.protocol.{
DynamicDomainParametersWithValidity,
DynamicSequencingParametersWithValidity,
}
import com.digitalasset.canton.topology.*
import com.digitalasset.canton.topology.client.PartyTopologySnapshotClient.{
AuthorityOfDelegation,
@ -149,6 +153,32 @@ class StoreBasedTopologySnapshot(
} yield domainParameters
}
override def findDynamicSequencingParameters()(implicit
traceContext: TraceContext
): Future[Either[String, DynamicSequencingParametersWithValidity]] =
findTransactions(
asOfInclusive = false,
types = Seq(TopologyMapping.Code.SequencingDynamicParametersState),
filterUid = None,
filterNamespace = None,
).map { transactions =>
for {
storedTx <- collectLatestTransaction(
TopologyMapping.Code.SequencingDynamicParametersState,
transactions
.collectOfMapping[DynamicSequencingParametersState]
.result,
).toRight(s"Unable to fetch sequencing parameters at $timestamp")
mapping = storedTx.mapping
} yield DynamicSequencingParametersWithValidity(
mapping.parameters,
storedTx.validFrom.value,
storedTx.validUntil.map(_.value),
mapping.domain,
)
}
/** List all the dynamic domain parameters (past and current) */
override def listDynamicDomainParametersChanges()(implicit
traceContext: TraceContext
@ -655,6 +685,25 @@ class StoreBasedTopologySnapshot(
}
}
override def memberFirstKnownAt(
member: Member
)(implicit traceContext: TraceContext): Future[Option[CantonTimestamp]] = {
member match {
case participantId: ParticipantId =>
store.findFirstTrustCertificateForParticipant(participantId).map(_.map(_.validFrom.value))
case mediatorId: MediatorId =>
store.findFirstMediatorStateForMediator(mediatorId).map(_.map(_.validFrom.value))
case sequencerId: SequencerId =>
store.findFirstSequencerStateForSequencer(sequencerId).map(_.map(_.validFrom.value))
case _ =>
Future.failed(
new IllegalArgumentException(
s"Checking whether member is known for an unexpected member type: $member"
)
)
}
}
private def collectLatestMapping[T <: TopologyMapping](
typ: TopologyMapping.Code,
transactions: Seq[StoredTopologyTransaction[TopologyChangeOp.Replace, T]],
@ -686,4 +735,7 @@ class StoreBasedTopologySnapshot(
transactions.lastOption
}
override def signingKeysUS(owner: Member)(implicit
traceContext: TraceContext
): FutureUnlessShutdown[Seq[SigningPublicKey]] = FutureUnlessShutdown.outcomeF(signingKeys(owner))
}

View File

@ -81,6 +81,16 @@ object TopologyTransactionRejection {
}
}
final case class NoCorrespondingActiveTxToRevoke(mapping: TopologyMapping)
extends TopologyTransactionRejection {
override def asString: String =
s"There is no active topology transaction matching the mapping of the revocation request: $mapping"
override def pretty: Pretty[NoCorrespondingActiveTxToRevoke.this.type] =
prettyOfString(_ => asString)
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
TopologyManagerError.NoCorrespondingActiveTxToRevoke.Mapping(mapping)
}
final case class InvalidTopologyMapping(err: String) extends TopologyTransactionRejection {
override def asString: String = s"Invalid mapping: $err"
override def pretty: Pretty[InvalidTopologyMapping] = prettyOfString(_ => asString)

View File

@ -15,7 +15,7 @@ import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.protocol.v30.Enums
import com.digitalasset.canton.protocol.v30.TopologyMapping.Mapping
import com.digitalasset.canton.protocol.{DynamicDomainParameters, v30}
import com.digitalasset.canton.protocol.{DynamicDomainParameters, DynamicSequencingParameters, v30}
import com.digitalasset.canton.serialization.ProtoConverter
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.topology.*
@ -27,6 +27,7 @@ import com.digitalasset.canton.topology.transaction.TopologyMapping.{
RequiredAuth,
}
import com.digitalasset.canton.util.OptionUtil
import com.digitalasset.canton.version.ProtoVersion
import com.digitalasset.canton.{LfPackageId, ProtoDeserializationError}
import com.google.common.annotations.VisibleForTesting
import slick.jdbc.SetParameter
@ -116,6 +117,8 @@ object TopologyMapping {
object PurgeTopologyTransaction extends Code(15, "ptt")
object TrafficControlState extends Code(16, "tcs")
object SequencingDynamicParametersState extends Code(17, "sep")
lazy val all: Seq[Code] = Seq(
NamespaceDelegation,
IdentifierDelegation,
@ -316,6 +319,8 @@ object TopologyMapping {
case Mapping.PartyToParticipant(value) => PartyToParticipant.fromProtoV30(value)
case Mapping.AuthorityOf(value) => AuthorityOf.fromProtoV30(value)
case Mapping.DomainParametersState(value) => DomainParametersState.fromProtoV30(value)
case Mapping.SequencingDynamicParametersState(value) =>
DynamicSequencingParametersState.fromProtoV30(value)
case Mapping.MediatorDomainState(value) => MediatorDomainState.fromProtoV30(value)
case Mapping.SequencerDomainState(value) => SequencerDomainState.fromProtoV30(value)
case Mapping.PurgeTopologyTxs(value) => PurgeTopologyTransaction.fromProtoV30(value)
@ -1270,6 +1275,64 @@ object DomainParametersState {
}
}
/** Dynamic sequencing parameter settings for the domain
*
* Each domain has a set of sequencing parameters that can be changed at runtime.
* These changes are authorized by the owner of the domain and distributed
* to all nodes accordingly.
*/
final case class DynamicSequencingParametersState(
domain: DomainId,
parameters: DynamicSequencingParameters,
) extends TopologyMapping {
def toProtoV30: v30.TopologyMapping =
v30.TopologyMapping(
v30.TopologyMapping.Mapping.SequencingDynamicParametersState(
v30.DynamicSequencingParametersState(
domain = domain.toProtoPrimitive,
sequencingParameters = Some(parameters.toProtoV30),
)
)
)
def code: TopologyMapping.Code = Code.SequencingDynamicParametersState
override def namespace: Namespace = domain.namespace
override def maybeUid: Option[UniqueIdentifier] = Some(domain.uid)
override def restrictedToDomain: Option[DomainId] = Some(domain)
override def requiredAuth(
previous: Option[TopologyTransaction[TopologyChangeOp, TopologyMapping]]
): RequiredAuth = RequiredUids(Set(domain.uid))
override def uniqueKey: MappingHash = DomainParametersState.uniqueKey(domain)
}
object DynamicSequencingParametersState {
def uniqueKey(domainId: DomainId): MappingHash =
TopologyMapping.buildUniqueKey(code)(_.add(domainId.toProtoPrimitive))
def code: TopologyMapping.Code = Code.SequencingDynamicParametersState
def fromProtoV30(
value: v30.DynamicSequencingParametersState
): ParsingResult[DynamicSequencingParametersState] = {
val v30.DynamicSequencingParametersState(domainIdP, sequencingParametersP) = value
for {
domainId <- DomainId.fromProtoPrimitive(domainIdP, "domain")
representativeProtocolVersion <- DynamicSequencingParameters.protocolVersionRepresentativeFor(
ProtoVersion(30)
)
parameters <- sequencingParametersP
.map(DynamicSequencingParameters.fromProtoV30)
.getOrElse(Right(DynamicSequencingParameters.default(representativeProtocolVersion)))
} yield DynamicSequencingParametersState(domainId, parameters)
}
}
/** Mediator definition for a domain
*
* Each domain needs at least one mediator (group), but can have multiple.

View File

@ -56,12 +56,23 @@ class ValidatingTopologyMappingChecks(
toValidate: GenericSignedTopologyTransaction,
inStore: Option[GenericSignedTopologyTransaction],
)(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] = {
val checkFirstIsNotRemove = EitherTUtil
.condUnitET(
!(toValidate.operation == TopologyChangeOp.Remove && inStore.isEmpty),
TopologyTransactionRejection.NoCorrespondingActiveTxToRevoke(toValidate.mapping),
)
val checkOpt = (toValidate.mapping.code, inStore.map(_.mapping.code)) match {
lazy val checkOpt = (toValidate.mapping.code, inStore.map(_.mapping.code)) match {
case (Code.DomainTrustCertificate, None | Some(Code.DomainTrustCertificate)) =>
toValidate
.selectMapping[DomainTrustCertificate]
.map(checkDomainTrustCertificate(effective, inStore.isEmpty, _))
val checkReplace = toValidate
.select[TopologyChangeOp.Replace, DomainTrustCertificate]
.map(checkDomainTrustCertificateReplace(effective, _))
val checkRemove = toValidate
.select[TopologyChangeOp.Remove, DomainTrustCertificate]
.map(checkDomainTrustCertificateRemove(effective, _))
checkReplace.orElse(checkRemove)
case (Code.PartyToParticipant, None | Some(Code.PartyToParticipant)) =>
toValidate
@ -100,6 +111,17 @@ class ValidatingTopologyMappingChecks(
)
)
case (Code.SequencerDomainState, None | Some(Code.SequencerDomainState)) =>
toValidate
.select[TopologyChangeOp.Replace, SequencerDomainState]
.map(
checkSequencerDomainStateReplace(
effective,
_,
inStore.flatMap(_.select[TopologyChangeOp.Replace, SequencerDomainState]),
)
)
case (Code.AuthorityOf, None | Some(Code.AuthorityOf)) =>
toValidate
.select[TopologyChangeOp.Replace, AuthorityOf]
@ -107,7 +129,9 @@ class ValidatingTopologyMappingChecks(
case otherwise => None
}
checkOpt.getOrElse(EitherTUtil.unit)
checkFirstIsNotRemove
.flatMap(_ => checkOpt.getOrElse(EitherTUtil.unit))
}
private def loadFromStore(
@ -155,32 +179,32 @@ class ValidatingTopologyMappingChecks(
}
private def checkDomainTrustCertificate(
private def checkDomainTrustCertificateRemove(
effective: EffectiveTime,
isFirst: Boolean,
toValidate: SignedTopologyTransaction[TopologyChangeOp, DomainTrustCertificate],
)(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] =
if (toValidate.operation == TopologyChangeOp.Remove && isFirst) {
EitherT.leftT(TopologyTransactionRejection.Other("Cannot have a remove as the first DTC"))
} else if (toValidate.operation == TopologyChangeOp.Remove) {
)(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] = {
/* Checks that the DTC is not being removed if the participant still hosts a party.
* This check is potentially quite expensive: we have to fetch all party to participant mappings, because
* we cannot index by the hosting participants.
*/
ensureParticipantDoesNotHostParties(effective, toValidate.mapping.participantId)
}
/* Checks that the DTC is not being removed if the participant still hosts a party.
* This check is potentially quite expensive: we have to fetch all party to participant mappings, because
* we cannot index by the hosting participants.
*/
ensureParticipantDoesNotHostParties(effective, toValidate.mapping.participantId)
} else if (isFirst) {
private def checkDomainTrustCertificateReplace(
effective: EffectiveTime,
toValidate: SignedTopologyTransaction[TopologyChangeOp, DomainTrustCertificate],
)(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] = {
// Checks if the participant is allowed to submit its domain trust certificate
val participantId = toValidate.mapping.participantId
// Checks if the participant is allowed to submit its domain trust certificate
val participantId = toValidate.mapping.participantId
for {
domainParamCandidates <- loadFromStore(effective, DomainParametersState.code, None)
restrictions = domainParamCandidates.result.view
def loadOnboardingRestriction()
: EitherT[Future, TopologyTransactionRejection, OnboardingRestriction] = {
loadFromStore(effective, DomainParametersState.code).map { domainParamCandidates =>
val restrictions = domainParamCandidates.result.view
.flatMap(_.selectMapping[DomainParametersState])
.collect { case tx =>
tx.mapping.parameters.onboardingRestriction
}
.toList match {
.map(_.mapping.parameters.onboardingRestriction)
.toList
restrictions match {
case Nil =>
logger.error(
"Can not determine the onboarding restriction. Assuming the domain is locked."
@ -193,62 +217,86 @@ class ValidatingTopologyMappingChecks(
)
param
}
_ <- (restrictions match {
case OnboardingRestriction.RestrictedLocked | OnboardingRestriction.UnrestrictedLocked =>
logger.info(s"Rejecting onboarding of new participant ${toValidate.mapping}")
EitherT.leftT(
TopologyTransactionRejection
.OnboardingRestrictionInPlace(
participantId,
restrictions,
None,
): TopologyTransactionRejection
)
case OnboardingRestriction.UnrestrictedOpen =>
EitherT.rightT(())
case OnboardingRestriction.RestrictedOpen =>
loadFromStore(
effective,
ParticipantDomainPermission.code,
filterUid = Some(Seq(toValidate.mapping.participantId.uid)),
).subflatMap { storedPermissions =>
val isAllowlisted = storedPermissions.result.view
.flatMap(_.selectMapping[ParticipantDomainPermission])
.collectFirst {
case x if x.mapping.domainId == toValidate.mapping.domainId =>
x.mapping.loginAfter
}
isAllowlisted match {
case Some(Some(loginAfter)) if loginAfter > effective.value =>
// this should not happen except under race conditions, as sequencers should not let participants login
logger.warn(
s"Rejecting onboarding of ${toValidate.mapping.participantId} as the participant still has a login ban until ${loginAfter}"
)
Left(
TopologyTransactionRejection
.OnboardingRestrictionInPlace(participantId, restrictions, Some(loginAfter))
)
case Some(_) =>
logger.info(
s"Accepting onboarding of ${toValidate.mapping.participantId} as it is allow listed"
)
Right(())
case None =>
logger.info(
s"Rejecting onboarding of ${toValidate.mapping.participantId} as it is not allow listed as of ${effective.value}"
)
Left(
TopologyTransactionRejection
.OnboardingRestrictionInPlace(participantId, restrictions, None)
)
}
}
}): EitherT[Future, TopologyTransactionRejection, Unit]
} yield ()
} else {
EitherTUtil.unit
}
}
def checkDomainIsNotLocked(restriction: OnboardingRestriction) = {
EitherTUtil.condUnitET(
restriction.isOpen, {
logger.info(
s"Domain is locked at $effective. Rejecting onboarding of new participant ${toValidate.mapping}"
)
TopologyTransactionRejection
.OnboardingRestrictionInPlace(
participantId,
restriction,
None,
)
},
)
}
def checkParticipantIsNotRestricted(
restrictions: OnboardingRestriction
): EitherT[Future, TopologyTransactionRejection, Unit] = {
// using the flags to check for restrictions instead of == UnrestrictedOpen to be more
// future proof in case we will add additional restrictions in the future and would miss a case,
// because there is no exhaustiveness check without full pattern matching
if (restrictions.isUnrestricted && restrictions.isOpen) {
// No further checks to be done. any participant can join the domain
EitherTUtil.unit
} else if (restrictions.isRestricted && restrictions.isOpen) {
// Only participants with explicit permission may join the domain
loadFromStore(
effective,
ParticipantDomainPermission.code,
filterUid = Some(Seq(toValidate.mapping.participantId.uid)),
).subflatMap { storedPermissions =>
val isAllowlisted = storedPermissions.result.view
.flatMap(_.selectMapping[ParticipantDomainPermission])
.collectFirst {
case x if x.mapping.domainId == toValidate.mapping.domainId =>
x.mapping.loginAfter
}
isAllowlisted match {
case Some(Some(loginAfter)) if loginAfter > effective.value =>
// this should not happen except under race conditions, as sequencers should not let participants login
logger.warn(
s"Rejecting onboarding of ${toValidate.mapping.participantId} as the participant still has a login ban until ${loginAfter}"
)
Left(
TopologyTransactionRejection
.OnboardingRestrictionInPlace(participantId, restrictions, Some(loginAfter))
)
case Some(_) =>
logger.info(
s"Accepting onboarding of ${toValidate.mapping.participantId} as it is allow listed"
)
Right(())
case None =>
logger.info(
s"Rejecting onboarding of ${toValidate.mapping.participantId} as it is not allow listed as of ${effective.value}"
)
Left(
TopologyTransactionRejection
.OnboardingRestrictionInPlace(participantId, restrictions, None)
)
}
}
} else {
EitherT.leftT(
TopologyTransactionRejection
.OnboardingRestrictionInPlace(participantId, restrictions, None)
)
}
}
for {
restriction <- loadOnboardingRestriction()
_ <- checkDomainIsNotLocked(restriction)
_ <- checkParticipantIsNotRestricted(restriction)
} yield ()
}
private val requiredKeyPurposes = Set(KeyPurpose.Encryption, KeyPurpose.Signing)
/** Checks the following:
@ -366,53 +414,57 @@ class ValidatingTopologyMappingChecks(
}
}
private def checkMissingNsdAndOtkMappings(
effectiveTime: EffectiveTime,
members: Set[Member],
)(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] = {
val otks = loadFromStore(
effectiveTime,
OwnerToKeyMapping.code,
filterUid = Some(members.toSeq.map(_.uid)),
)
val nsds = loadFromStore(
effectiveTime,
NamespaceDelegation.code,
filterNamespace = Some(members.toSeq.map(_.namespace)),
)
for {
otks <- otks
nsds <- nsds
membersWithOTK = otks.result.flatMap(
_.selectMapping[OwnerToKeyMapping].map(_.mapping.member)
)
missingOTK = members -- membersWithOTK
rootCertificates = nsds.result
.flatMap(_.selectMapping[NamespaceDelegation].filter(_.mapping.isRootDelegation))
.map(_.mapping.namespace)
.toSet
missingNSD = members.filter(med => !rootCertificates.contains(med.namespace))
otk = missingOTK.map(_ -> Seq(OwnerToKeyMapping.code)).toMap
nsd = missingNSD.map(_ -> Seq(NamespaceDelegation.code)).toMap
missingMappings = otk.combine(nsd)
_ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
missingOTK.isEmpty && missingNSD.isEmpty,
TopologyTransactionRejection.MissingMappings(
missingMappings.view.mapValues(_.sortBy(_.dbInt)).toMap
),
)
} yield {}
}
private def checkMediatorDomainStateReplace(
effectiveTime: EffectiveTime,
toValidate: SignedTopologyTransaction[TopologyChangeOp.Replace, MediatorDomainState],
inStore: Option[SignedTopologyTransaction[TopologyChangeOp.Replace, MediatorDomainState]],
)(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] = {
def checkMissingMappings(): EitherT[Future, TopologyTransactionRejection, Unit] = {
val newMediators = (toValidate.mapping.allMediatorsInGroup.toSet -- inStore.toList.flatMap(
_.mapping.allMediatorsInGroup
)).map(identity[Member])
val otks = loadFromStore(
effectiveTime,
OwnerToKeyMapping.code,
filterUid = Some(newMediators.toSeq.map(_.uid)),
)
val nsds = loadFromStore(
effectiveTime,
NamespaceDelegation.code,
filterNamespace = Some(newMediators.toSeq.map(_.namespace)),
)
for {
otks <- otks
nsds <- nsds
mediatorsWithOTK = otks.result.flatMap(
_.selectMapping[OwnerToKeyMapping].map(_.mapping.member)
)
missingOTK = newMediators -- mediatorsWithOTK
rootCertificates = nsds.result
.flatMap(_.selectMapping[NamespaceDelegation].filter(_.mapping.isRootDelegation))
.map(_.mapping.namespace)
.toSet
missingNSD = newMediators.filter(med => !rootCertificates.contains(med.namespace))
otk = missingOTK.map(_ -> Seq(OwnerToKeyMapping.code)).toMap
nsd = missingNSD.map(_ -> Seq(NamespaceDelegation.code)).toMap
missingMappings = otk.combine(nsd)
_ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
missingOTK.isEmpty && missingNSD.isEmpty,
TopologyTransactionRejection.MissingMappings(
missingMappings.view.mapValues(_.sortBy(_.dbInt)).toMap
),
)
} yield {}
}
val newMediators = (toValidate.mapping.allMediatorsInGroup.toSet -- inStore.toList.flatMap(
_.mapping.allMediatorsInGroup
)).map(identity[Member])
val thresholdCheck = EitherTUtil.condUnitET(
toValidate.mapping.threshold.value <= toValidate.mapping.active.size,
@ -421,7 +473,26 @@ class ValidatingTopologyMappingChecks(
toValidate.mapping.active.size,
),
)
thresholdCheck.flatMap(_ => checkMissingMappings())
thresholdCheck.flatMap(_ => checkMissingNsdAndOtkMappings(effectiveTime, newMediators))
}
private def checkSequencerDomainStateReplace(
effectiveTime: EffectiveTime,
toValidate: SignedTopologyTransaction[TopologyChangeOp.Replace, SequencerDomainState],
inStore: Option[SignedTopologyTransaction[TopologyChangeOp.Replace, SequencerDomainState]],
)(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] = {
val newSequencers = (toValidate.mapping.allSequencers.toSet -- inStore.toList.flatMap(
_.mapping.allSequencers
)).map(identity[Member])
val thresholdCheck = EitherTUtil.condUnitET(
toValidate.mapping.threshold.value <= toValidate.mapping.active.size,
TopologyTransactionRejection.ThresholdTooHigh(
toValidate.mapping.threshold.value,
toValidate.mapping.active.size,
),
)
thresholdCheck.flatMap(_ => checkMissingNsdAndOtkMappings(effectiveTime, newSequencers))
}
private def checkAuthorityOf(

View File

@ -4,6 +4,8 @@
package com.digitalasset.canton.tracing
import com.digitalasset.canton.concurrent.DirectExecutionContext
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.lifecycle.FutureUnlessShutdownImpl.AbortedDueToShutdownException
import com.digitalasset.canton.logging.TracedLogger
import com.github.blemale.scaffeine.{AsyncLoadingCache, Scaffeine}
@ -37,6 +39,34 @@ object TracedScaffeine {
)(tracedLogger)
}
def buildTracedAsyncFutureUS[K1, V1](
cache: Scaffeine[Any, Any],
loader: TraceContext => K1 => FutureUnlessShutdown[V1],
allLoader: Option[TraceContext => Iterable[K1] => FutureUnlessShutdown[Map[K1, V1]]] = None,
)(
tracedLogger: TracedLogger
)(implicit ec: ExecutionContext): TracedAsyncLoadingCache[K1, V1] = {
new TracedAsyncLoadingCache[K1, V1](
cache.buildAsyncFuture[TracedKey[K1], V1](
loader = tracedKey =>
loader(tracedKey.traceContext)(tracedKey.key)
.failOnShutdownToAbortException("TracedAsyncLoadingCache-loader"),
allLoader = allLoader.map { tracedFunction => (tracedKeys: Iterable[TracedKey[K1]]) =>
{
val traceContext = tracedKeys.headOption
.map(_.traceContext)
.getOrElse(TraceContext.empty)
val keys = tracedKeys.map(_.key)
tracedFunction(traceContext)(keys)
.map(_.map { case (key, value) => TracedKey(key)(traceContext) -> value })
}.failOnShutdownToAbortException("TracedAsyncLoadingCache-allLoader")
},
)
)(tracedLogger)
}
}
class TracedAsyncLoadingCache[K, V](
@ -49,6 +79,14 @@ class TracedAsyncLoadingCache[K, V](
def get(key: K)(implicit traceContext: TraceContext): Future[V] =
underlying.get(TracedKey(key)(traceContext))
def getUS(key: K)(implicit traceContext: TraceContext): FutureUnlessShutdown[V] = {
try
FutureUnlessShutdown.outcomeF(underlying.get(TracedKey(key)(traceContext)))
catch {
case _: AbortedDueToShutdownException => FutureUnlessShutdown.abortedDueToShutdown
}
}
/** @see com.github.blemale.scaffeine.AsyncLoadingCache.getAll
*/
def getAll(keys: Iterable[K])(implicit traceContext: TraceContext): Future[Map[K, V]] =
@ -56,5 +94,18 @@ class TracedAsyncLoadingCache[K, V](
.getAll(keys.map(TracedKey(_)(traceContext)))
.map(_.map { case (tracedKey, value) => tracedKey.key -> value })(ec)
def getAllUS(
keys: Iterable[K]
)(implicit traceContext: TraceContext): FutureUnlessShutdown[Map[K, V]] =
try
FutureUnlessShutdown.outcomeF(
underlying
.getAll(keys.map(TracedKey(_)(traceContext)))
.map(_.map { case (tracedKey, value) => tracedKey.key -> value })(ec)
)
catch {
case _: AbortedDueToShutdownException => FutureUnlessShutdown.abortedDueToShutdown
}
override def toString = s"TracedAsyncLoadingCache($underlying)"
}

View File

@ -4,6 +4,7 @@
package com.digitalasset.canton.util
import cats.syntax.either.*
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import scala.concurrent.Future
@ -39,6 +40,11 @@ object EitherUtil {
case Left(value) => Future.failed(f(value))
case Right(value) => Future.successful(value)
}
def toFutureUS(f: L => Throwable): FutureUnlessShutdown[R] = either match {
case Left(value) => FutureUnlessShutdown.failed(f(value))
case Right(value) => FutureUnlessShutdown.pure(value)
}
}
implicit class RichEitherIterable[L, R](val eithers: Iterable[Either[L, R]]) extends AnyVal {

View File

@ -5,6 +5,7 @@ package com.digitalasset.canton.util
import cats.MonadThrow
import cats.data.EitherT
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Try
@ -80,6 +81,12 @@ object ResourceUtil {
withResourceM(r)(f)
}
def withResourceFutureUS[T <: AutoCloseable, V](r: => T)(f: T => FutureUnlessShutdown[V])(implicit
ec: ExecutionContext
): FutureUnlessShutdown[V] = {
withResourceM(r)(f)
}
def closeAndAddSuppressed(e: Option[Throwable], resource: AutoCloseable): Unit =
e.fold(resource.close()) { exception =>
try {

View File

@ -14,6 +14,7 @@ import com.digitalasset.canton.serialization.ProtoConverter
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.store.db.DbDeserializationException
import com.digitalasset.canton.util.BinaryFileUtil
import com.digitalasset.canton.version.Transfer.{SourceProtocolVersion, TargetProtocolVersion}
import com.digitalasset.canton.{ProtoDeserializationError, checked}
import com.google.common.annotations.VisibleForTesting
import com.google.protobuf.{ByteString, InvalidProtocolBufferException}
@ -92,7 +93,8 @@ object RepresentativeProtocolVersion {
* instead.
*/
trait HasProtocolVersionedWrapper[ValueClass <: HasRepresentativeProtocolVersion]
extends HasRepresentativeProtocolVersion {
extends HasRepresentativeProtocolVersion
with HasToByteString {
self: ValueClass =>
@transient
@ -1169,6 +1171,38 @@ trait HasProtocolVersionedWithContextAndValidationCompanion[
super.fromByteString(expectedProtocolVersion)((context, expectedProtocolVersion))(bytes)
}
/** Similar to [[HasProtocolVersionedWithContextAndValidationCompanion]] but the deserialization
* context contains a [[com.digitalasset.canton.version.Transfer.TargetProtocolVersion]] for validation.
*/
trait HasProtocolVersionedWithContextAndValidationWithTargetProtocolVersionCompanion[
ValueClass <: HasRepresentativeProtocolVersion,
RawContext,
] extends HasProtocolVersionedWithContextCompanion[
ValueClass,
(RawContext, TargetProtocolVersion),
] {
def fromByteString(context: RawContext, expectedProtocolVersion: TargetProtocolVersion)(
bytes: OriginalByteString
): ParsingResult[ValueClass] =
super.fromByteString(expectedProtocolVersion.v)((context, expectedProtocolVersion))(bytes)
}
/** Similar to [[HasProtocolVersionedWithContextAndValidationCompanion]] but the deserialization
* context contains a [[com.digitalasset.canton.version.Transfer.SourceProtocolVersion]] for validation.
*/
trait HasProtocolVersionedWithContextAndValidationWithSourceProtocolVersionCompanion[
ValueClass <: HasRepresentativeProtocolVersion,
RawContext,
] extends HasProtocolVersionedWithContextCompanion[
ValueClass,
(RawContext, SourceProtocolVersion),
] {
def fromByteString(context: RawContext, expectedProtocolVersion: SourceProtocolVersion)(
bytes: OriginalByteString
): ParsingResult[ValueClass] =
super.fromByteString(expectedProtocolVersion.v)((context, expectedProtocolVersion))(bytes)
}
/** Similar to [[HasProtocolVersionedWithContextAndValidationCompanion]] but the deserialization
* context contains '''only''' the protocol version for validation.
*/

View File

@ -0,0 +1,23 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.digitalasset.canton.version
import com.google.protobuf.ByteString
/** Trait for classes that can be serialized to a [[com.google.protobuf.ByteString]].
*
* Typically, these classes also implement the [[HasProtocolVersionedWrapper]] trait.
* Such classes embed logic together with a representative protocol version that determines the ProtoBuf
* serialization and deserialization.
* Hence, [[HasToByteString.toByteString]] does not take any arguments.
* In contrast, [[HasVersionedToByteString]] is tailored towards another
* ProtoBuf serialization/deserialization tooling.
*
* See "README.md" for our guidelines on the (de-)serialization tooling.
*/
trait HasToByteString {
def toByteString: ByteString
}

View File

@ -244,8 +244,7 @@ trait HasVersionedMessageCompanionDbHelpers[ValueClass <: HasVersionedWrapper[Va
/** Traits for the companion objects of classes that implement [[HasVersionedWrapper]].
* They provide default methods.
* Unlike [[HasVersionedMessageCompanion]] these traits allow to pass additional
* context to the conversion methods (see, e.g., [[com.digitalasset.canton.data.TransferInViewTree.fromProtoVersioned]]
* which takes a `HashOps` parameter).
* context to the conversion methods.
*/
trait HasVersionedMessageWithContextCompanion[ValueClass, Ctx]
extends HasVersionedMessageCompanionCommon[ValueClass] {

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240517.13076.0.v9472bf3c
sdk-version: 3.1.0-snapshot.20240527.13089.0.vb44823df
build-options:
- --target=2.1
name: CantonExamples

View File

@ -1,7 +1,15 @@
-- Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
-- SPDX-License-Identifier: Apache-2.0
create table par_daml_packages (package_id varchar(300) not null primary key, data binary large object not null, source_description varchar not null default 'default');
create table par_daml_packages (
package_id varchar(300) not null primary key,
data binary large object not null,
source_description varchar(300) not null default 'default',
-- UTC timestamp stored in microseconds relative to EPOCH
uploaded_at bigint not null,
-- The size of the archive payload (i.e., the serialized DAML-LF package), in bytes
package_size bigint not null
);
create table par_dars (
hash_hex varchar(300) not null primary key,

View File

@ -1 +1 @@
619f2cd20019be6cf3558f60bb94242798a055267bb6b4218e554579c7081326
88829cbe8175b0ad292eff02a0807782bcd3b579d279aada9ac4cb2efa84e476

View File

@ -20,45 +20,6 @@ CREATE TABLE lapi_parameters (
participant_all_divulged_contracts_pruned_up_to_inclusive VARCHAR(4000)
);
---------------------------------------------------------------------------------------------------
-- List of packages
--
-- A table for tracking DAML-LF packages.
---------------------------------------------------------------------------------------------------
CREATE TABLE lapi_packages (
package_id VARCHAR(4000) PRIMARY KEY NOT NULL,
upload_id VARCHAR(1000) NOT NULL,
source_description VARCHAR(1000),
package_size BIGINT NOT NULL,
known_since BIGINT NOT NULL,
ledger_offset VARCHAR(4000) NOT NULL,
package BINARY LARGE OBJECT NOT NULL
);
CREATE INDEX lapi_packages_ledger_offset_idx ON lapi_packages (ledger_offset);
---------------------------------------------------------------------------------------------------
-- Package entries
--
-- A table for tracking DAML-LF package submissions
-- It includes id to track the package submission and status
---------------------------------------------------------------------------------------------------
CREATE TABLE lapi_package_entries (
ledger_offset VARCHAR(4000) PRIMARY KEY NOT NULL,
recorded_at BIGINT NOT NULL,
submission_id VARCHAR(1000),
typ VARCHAR(1000) NOT NULL,
rejection_reason VARCHAR(1000),
CONSTRAINT check_package_entry_type
CHECK (
(typ = 'accept' AND rejection_reason IS NULL) OR
(typ = 'reject' AND rejection_reason IS NOT NULL)
)
);
CREATE INDEX lapi_package_entries_idx ON lapi_package_entries (submission_id);
---------------------------------------------------------------------------------------------------
-- Party entries
--
@ -422,6 +383,7 @@ CREATE TABLE lapi_pe_create_id_filter_stakeholder (
);
CREATE INDEX lapi_pe_create_id_filter_stakeholder_pts_idx ON lapi_pe_create_id_filter_stakeholder(party_id, template_id, event_sequential_id);
CREATE INDEX lapi_pe_create_id_filter_stakeholder_pt_idx ON lapi_pe_create_id_filter_stakeholder(party_id, event_sequential_id);
CREATE INDEX lapi_pe_create_id_filter_stakeholder_ts_idx ON lapi_pe_create_id_filter_stakeholder(template_id, event_sequential_id);
CREATE INDEX lapi_pe_create_id_filter_stakeholder_s_idx ON lapi_pe_create_id_filter_stakeholder(event_sequential_id);
CREATE TABLE lapi_pe_create_id_filter_non_stakeholder_informee (
@ -438,6 +400,7 @@ CREATE TABLE lapi_pe_consuming_id_filter_stakeholder (
);
CREATE INDEX lapi_pe_consuming_id_filter_stakeholder_pts_idx ON lapi_pe_consuming_id_filter_stakeholder(party_id, template_id, event_sequential_id);
CREATE INDEX lapi_pe_consuming_id_filter_stakeholder_ps_idx ON lapi_pe_consuming_id_filter_stakeholder(party_id, event_sequential_id);
CREATE INDEX lapi_pe_consuming_id_filter_stakeholder_ts_idx ON lapi_pe_consuming_id_filter_stakeholder(template_id, event_sequential_id);
CREATE INDEX lapi_pe_consuming_id_filter_stakeholder_s_idx ON lapi_pe_consuming_id_filter_stakeholder(event_sequential_id);
CREATE TABLE lapi_pe_unassign_id_filter_stakeholder (
@ -447,6 +410,7 @@ CREATE TABLE lapi_pe_unassign_id_filter_stakeholder (
);
CREATE INDEX lapi_pe_unassign_id_filter_stakeholder_pts_idx ON lapi_pe_unassign_id_filter_stakeholder(party_id, template_id, event_sequential_id);
CREATE INDEX lapi_pe_unassign_id_filter_stakeholder_ps_idx ON lapi_pe_unassign_id_filter_stakeholder(party_id, event_sequential_id);
CREATE INDEX lapi_pe_unassign_id_filter_stakeholder_ts_idx ON lapi_pe_unassign_id_filter_stakeholder(template_id, event_sequential_id);
CREATE INDEX lapi_pe_unassign_id_filter_stakeholder_s_idx ON lapi_pe_unassign_id_filter_stakeholder(event_sequential_id);
CREATE TABLE lapi_pe_assign_id_filter_stakeholder (
@ -456,6 +420,7 @@ CREATE TABLE lapi_pe_assign_id_filter_stakeholder (
);
CREATE INDEX lapi_pe_assign_id_filter_stakeholder_pts_idx ON lapi_pe_assign_id_filter_stakeholder(party_id, template_id, event_sequential_id);
CREATE INDEX lapi_pe_assign_id_filter_stakeholder_ps_idx ON lapi_pe_assign_id_filter_stakeholder(party_id, event_sequential_id);
CREATE INDEX lapi_pe_assign_id_filter_stakeholder_ts_idx ON lapi_pe_assign_id_filter_stakeholder(template_id, event_sequential_id);
CREATE INDEX lapi_pe_assign_id_filter_stakeholder_s_idx ON lapi_pe_assign_id_filter_stakeholder(event_sequential_id);
CREATE TABLE lapi_pe_consuming_id_filter_non_stakeholder_informee (

View File

@ -4,7 +4,11 @@
create table par_daml_packages (
package_id varchar(300) collate "C" not null primary key,
data bytea not null,
source_description varchar not null default 'default'
source_description varchar(300) collate "C" not null default 'default',
-- UTC timestamp is stored in microseconds relative to EPOCH
uploaded_at bigint not null,
-- The size of the archive payload (i.e., the serialized DAML-LF package), in bytes
package_size bigint not null
);
create table par_dars (

View File

@ -1 +1 @@
a563a3d5f29d51ff9d1223167576744b921f623717f4304e2303f4243c1248e7
de4d35c9af31b23cb0653b3982a78662b3c39d44acceb7bb2b997c300d16bd41

View File

@ -112,7 +112,9 @@ create or replace view debug.par_daml_packages as
select
package_id,
data,
source_description
source_description,
uploaded_at,
package_size
from par_daml_packages;
create or replace view debug.par_dars as

View File

@ -1 +1 @@
1226f18e579862b6384becbf7d382e43fb321dbfbfe8bad57698caed8602a5e6
a49a69f94ca6afec9e413faa61b396d4fbdabda1e12f4ff94842cdfd0428ad8b

View File

@ -40,65 +40,6 @@ CREATE TABLE lapi_transaction_metering (
CREATE INDEX lapi_transaction_metering_ledger_offset_idx ON lapi_transaction_metering USING btree (ledger_offset);
---------------------------------------------------------------------------------------------------
-- Package entries
--
-- A table for tracking DAML-LF package submissions.
-- It includes id to track the package submission and status.
---------------------------------------------------------------------------------------------------
CREATE TABLE lapi_package_entries (
ledger_offset varchar(4000) collate "C" primary key not null,
recorded_at bigint not null, --with timezone
-- SubmissionId for package to be uploaded
submission_id varchar(1000) collate "C",
-- The type of entry, one of 'accept' or 'reject'
typ varchar(1000) collate "C" not null,
-- If the type is 'reject', then the rejection reason is set.
-- Rejection reason is a human-readable description why the change was rejected.
rejection_reason varchar(1000) collate "C",
constraint check_package_entry_type
check (
(typ = 'accept' and rejection_reason is null) or
(typ = 'reject' and rejection_reason is not null)
)
);
-- Index for retrieving the package entry by submission id
CREATE INDEX lapi_package_entries_idx ON lapi_package_entries USING btree (submission_id);
---------------------------------------------------------------------------------------------------
-- List of packages
--
-- A table for tracking DAML-LF packages.
---------------------------------------------------------------------------------------------------
CREATE TABLE lapi_packages (
-- The unique identifier of the package (the hash of its content)
package_id varchar(4000) collate "C" primary key not null,
-- Packages are uploaded as DAR files (i.e., in groups)
-- This field can be used to find out which packages were uploaded together
upload_id varchar(1000) collate "C" not null,
-- A human readable description of the package source
source_description varchar(1000) collate "C",
-- The size of the archive payload (i.e., the serialized DAML-LF package), in bytes
package_size bigint not null,
-- The time when the package was added
known_since bigint not null,
-- The ledger end at the time when the package was added
ledger_offset varchar(4000) collate "C" not null,
-- The DAML-LF archive, serialized using the protobuf message `daml_lf.Archive`.
-- See also `daml-lf/archive/da/daml_lf.proto`.
package bytea not null
);
---------------------------------------------------------------------------------------------------
-- Indices to speed up indexer initialization
--
-- At startup, the indexer deletes all entries with an offset beyond the stored ledger end.
-- Such entries can be written when the indexer crashes right before updating the ledger end.
-- This migration adds missing indices to speed up the deletion of such entries.
---------------------------------------------------------------------------------------------------
CREATE INDEX lapi_packages_ledger_offset_idx ON lapi_packages USING btree (ledger_offset);
---------------------------------------------------------------------------------------------------
-- Parameters
--
@ -599,6 +540,7 @@ CREATE TABLE lapi_pe_assign_id_filter_stakeholder (
);
CREATE INDEX lapi_pe_assign_id_filter_stakeholder_pts_idx ON lapi_pe_assign_id_filter_stakeholder(party_id, template_id, event_sequential_id);
CREATE INDEX lapi_pe_assign_id_filter_stakeholder_ps_idx ON lapi_pe_assign_id_filter_stakeholder(party_id, event_sequential_id);
CREATE INDEX lapi_pe_assign_id_filter_stakeholder_ts_idx ON lapi_pe_assign_id_filter_stakeholder(template_id, event_sequential_id);
CREATE INDEX lapi_pe_assign_id_filter_stakeholder_s_idx ON lapi_pe_assign_id_filter_stakeholder(event_sequential_id);
@ -616,9 +558,10 @@ CREATE TABLE lapi_pe_consuming_id_filter_stakeholder (
party_id integer not null
);
CREATE INDEX lapi_pe_consuming_id_filter_stakeholder_ps_idx ON lapi_pe_consuming_id_filter_stakeholder USING btree (party_id, event_sequential_id);
CREATE INDEX lapi_pe_consuming_id_filter_stakeholder_ps_idx ON lapi_pe_consuming_id_filter_stakeholder USING btree (party_id, event_sequential_id);
CREATE INDEX lapi_pe_consuming_id_filter_stakeholder_pts_idx ON lapi_pe_consuming_id_filter_stakeholder USING btree (party_id, template_id, event_sequential_id);
CREATE INDEX lapi_pe_consuming_id_filter_stakeholder_s_idx ON lapi_pe_consuming_id_filter_stakeholder USING btree (event_sequential_id);
CREATE INDEX lapi_pe_consuming_id_filter_stakeholder_ts_idx ON lapi_pe_consuming_id_filter_stakeholder USING btree (template_id, event_sequential_id);
CREATE INDEX lapi_pe_consuming_id_filter_stakeholder_s_idx ON lapi_pe_consuming_id_filter_stakeholder USING btree (event_sequential_id);
CREATE TABLE lapi_pe_create_id_filter_non_stakeholder_informee (
event_sequential_id bigint not null,
@ -635,9 +578,10 @@ CREATE TABLE lapi_pe_create_id_filter_stakeholder (
party_id integer not null
);
CREATE INDEX lapi_pe_create_id_filter_stakeholder_pt_idx ON lapi_pe_create_id_filter_stakeholder USING btree (party_id, event_sequential_id);
CREATE INDEX lapi_pe_create_id_filter_stakeholder_pt_idx ON lapi_pe_create_id_filter_stakeholder USING btree (party_id, event_sequential_id);
CREATE INDEX lapi_pe_create_id_filter_stakeholder_pts_idx ON lapi_pe_create_id_filter_stakeholder USING btree (party_id, template_id, event_sequential_id);
CREATE INDEX lapi_pe_create_id_filter_stakeholder_s_idx ON lapi_pe_create_id_filter_stakeholder USING btree (event_sequential_id);
CREATE INDEX lapi_pe_create_id_filter_stakeholder_ts_idx ON lapi_pe_create_id_filter_stakeholder USING btree (template_id, event_sequential_id);
CREATE INDEX lapi_pe_create_id_filter_stakeholder_s_idx ON lapi_pe_create_id_filter_stakeholder USING btree (event_sequential_id);
CREATE TABLE lapi_pe_non_consuming_id_filter_informee (
event_sequential_id bigint not null,
@ -655,6 +599,7 @@ CREATE TABLE lapi_pe_unassign_id_filter_stakeholder (
CREATE INDEX lapi_pe_unassign_id_filter_stakeholder_ps_idx ON lapi_pe_unassign_id_filter_stakeholder USING btree (party_id, event_sequential_id);
CREATE INDEX lapi_pe_unassign_id_filter_stakeholder_pts_idx ON lapi_pe_unassign_id_filter_stakeholder USING btree (party_id, template_id, event_sequential_id);
CREATE INDEX lapi_pe_unassign_id_filter_stakeholder_ts_idx ON lapi_pe_unassign_id_filter_stakeholder USING btree (template_id, event_sequential_id);
CREATE INDEX lapi_pe_unassign_id_filter_stakeholder_s_idx ON lapi_pe_unassign_id_filter_stakeholder USING btree (event_sequential_id);
CREATE TABLE lapi_string_interning (

View File

@ -1 +1 @@
71228af9c50adab853f11dfe4e02ee7699b1372e4e0b8a576c912efb8e4a2154
de7ca577f4308cba1c5eb84daf89929f8d26d5298ebc955fd308f2046f9014dd

View File

@ -121,26 +121,6 @@ create or replace view debug.lapi_transaction_metering as
ledger_offset
from lapi_transaction_metering;
create or replace view debug.lapi_package_entries as
select
ledger_offset,
debug.canton_timestamp(recorded_at) as recorded_at,
submission_id,
typ,
rejection_reason
from lapi_package_entries;
create or replace view debug.lapi_packages as
select
package_id,
upload_id,
source_description,
package_size,
debug.canton_timestamp(known_since) as known_since,
ledger_offset,
package
from lapi_packages;
create or replace view debug.lapi_parameters as
select
ledger_end,

View File

@ -13,7 +13,6 @@ import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.protocol.messages.*
import com.digitalasset.canton.sequencing.client.*
import com.digitalasset.canton.sequencing.protocol.*
import com.digitalasset.canton.sequencing.{ApplicationHandler, EnvelopeHandler, NoEnvelopeBox}
import com.digitalasset.canton.time.Clock
import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction
import com.digitalasset.canton.topology.{DomainId, Member}
@ -29,10 +28,6 @@ trait RegisterTopologyTransactionHandle extends FlagCloseable {
def submit(transactions: Seq[GenericSignedTopologyTransaction])(implicit
traceContext: TraceContext
): FutureUnlessShutdown[Seq[TopologyTransactionsBroadcast.State]]
// we don't need to register a specific message handler, because we use SequencerClientSend's SendTracker
val processor: EnvelopeHandler =
ApplicationHandler.success[NoEnvelopeBox, DefaultOpenEnvelope]()
}
class SequencerBasedRegisterTopologyTransactionHandle(

View File

@ -15,8 +15,8 @@ import com.digitalasset.canton.crypto.store.{CryptoPrivateStoreError, CryptoPubl
import com.digitalasset.canton.crypto.{v30 as cryptoproto, *}
import com.digitalasset.canton.error.BaseCantonError
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors
import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown
import com.digitalasset.canton.networking.grpc.{CantonGrpcUtil, StaticGrpcServices}
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.serialization.{
@ -37,7 +37,7 @@ class GrpcVaultService(
crypto: Crypto,
enablePreviewFeatures: Boolean,
override protected val loggerFactory: NamedLoggerFactory,
)(implicit val ec: ExecutionContext)
)(implicit val ec: ExecutionContext, err: ErrorLoggingContext)
extends v30.VaultServiceGrpc.VaultService
with NamedLogging {
@ -64,7 +64,6 @@ class GrpcVaultService(
CryptoPublicStoreError.ErrorCode
.WrapStr(s"Failed to retrieve public keys: $err")
}
.mapK(FutureUnlessShutdown.outcomeK)
publicKeys <-
keys.toList.parFilterA(pk =>
crypto.cryptoPrivateStore
@ -101,7 +100,7 @@ class GrpcVaultService(
implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext
for {
publicKey <-
Future(
FutureUnlessShutdown.pure(
ProtoConverter
.parse(
cryptoproto.PublicKey.parseFrom,
@ -110,7 +109,7 @@ class GrpcVaultService(
)
.valueOr(err => throw ProtoDeserializationFailure.WrapNoLogging(err).asGrpcError)
)
name <- Future(
name <- FutureUnlessShutdown.pure(
KeyName
.fromProtoPrimitive(request.name)
.valueOr(err => throw ProtoDeserializationFailure.WrapNoLogging(err).asGrpcError)
@ -119,7 +118,7 @@ class GrpcVaultService(
.storePublicKey(publicKey, name.emptyStringAsNone)
.valueOr(err => throw CryptoPublicStoreError.ErrorCode.Wrap(err).asGrpcError)
} yield v30.ImportPublicKeyResponse(fingerprint = publicKey.fingerprint.unwrap)
}
}.failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError)
override def listPublicKeys(
request: v30.ListPublicKeysRequest
@ -134,7 +133,7 @@ class GrpcVaultService(
.WrapStr(s"Failed to retrieve public keys: $err")
.asGrpcError
)
}
}.failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError)
override def generateSigningKey(
request: v30.GenerateSigningKeyRequest
@ -225,15 +224,15 @@ class GrpcVaultService(
for {
// TODO(i13613): Remove feature flag check in favor of exportable keys check
_ <-
if (enablePreviewFeatures) Future.unit
if (enablePreviewFeatures) FutureUnlessShutdown.unit
else
Future.failed(
FutureUnlessShutdown.failed(
Status.FAILED_PRECONDITION
.withDescription("Remote export of private keys only allowed when preview is enabled")
.asRuntimeException()
)
cryptoPrivateStore <-
Future(
FutureUnlessShutdown.pure(
crypto.cryptoPrivateStore.toExtended.getOrElse(
throw Status.FAILED_PRECONDITION
.withDescription(
@ -243,7 +242,7 @@ class GrpcVaultService(
)
)
fingerprint <-
Future(
FutureUnlessShutdown.pure(
Fingerprint
.fromProtoPrimitive(request.fingerprint)
.valueOr(err =>
@ -253,7 +252,7 @@ class GrpcVaultService(
)
)
protocolVersion <-
Future(
FutureUnlessShutdown.pure(
ProtocolVersion
.fromProtoPrimitive(request.protocolVersion)
.valueOr(err =>
@ -263,7 +262,7 @@ class GrpcVaultService(
)
)
privateKey <-
EitherTUtil.toFuture(
EitherTUtil.toFutureUnlessShutdown(
cryptoPrivateStore
.exportPrivateKey(fingerprint)
.leftMap(_.toString)
@ -273,9 +272,8 @@ class GrpcVaultService(
.withDescription(s"Error retrieving private key [$fingerprint] $err")
.asRuntimeException()
)
.onShutdown(Left(GrpcErrors.AbortedDueToShutdown.Error().asGrpcError))
)
publicKey <- EitherTUtil.toFuture(
publicKey <- EitherTUtil.toFutureUnlessShutdown(
crypto.cryptoPublicStore
.publicKey(fingerprint)
.leftMap(_.toString)
@ -288,11 +286,11 @@ class GrpcVaultService(
)
keyPair <- (publicKey, privateKey) match {
case (pub: SigningPublicKey, pkey: SigningPrivateKey) =>
Future.successful(new SigningKeyPair(pub, pkey))
FutureUnlessShutdown.pure(new SigningKeyPair(pub, pkey))
case (pub: EncryptionPublicKey, pkey: EncryptionPrivateKey) =>
Future.successful(new EncryptionKeyPair(pub, pkey))
FutureUnlessShutdown.pure(new EncryptionKeyPair(pub, pkey))
case _ =>
Future.failed(
FutureUnlessShutdown.failed(
Status.INVALID_ARGUMENT
.withDescription(
"public and private keys must have same purpose"
@ -318,13 +316,13 @@ class GrpcVaultService(
Right(v30.ExportKeyPairResponse(keyPair = keyPair.toByteString(protocolVersion)))
}
result <- resultE.toFuture { err =>
result <- FutureUnlessShutdown.outcomeF(resultE.toFuture { err =>
Status.FAILED_PRECONDITION
.withDescription(s"Failed to encrypt exported keypair with password: $err")
.asRuntimeException()
}
})
} yield result
}
}.failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError)
override def importKeyPair(
request: v30.ImportKeyPairRequest
@ -351,9 +349,9 @@ class GrpcVaultService(
def loadKeyPair(
validatedName: Option[KeyName],
keyPair: CryptoKeyPair[PublicKey, PrivateKey],
)(implicit traceContext: TraceContext): Future[Unit] =
)(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] =
for {
cryptoPrivateStore <- Future(
cryptoPrivateStore <- FutureUnlessShutdown.pure(
crypto.cryptoPrivateStore.toExtended.getOrElse(
throw Status.FAILED_PRECONDITION
.withDescription(
@ -371,21 +369,19 @@ class GrpcVaultService(
existing <- crypto.cryptoPublicStore.publicKey(keyPair.publicKey.fingerprint)
_ <-
if (existing.contains(keyPair.publicKey))
EitherT.rightT[Future, CryptoPublicStoreError](())
else EitherT.leftT[Future, Unit](error: CryptoPublicStoreError)
EitherT.rightT[FutureUnlessShutdown, CryptoPublicStoreError](())
else EitherT.leftT[FutureUnlessShutdown, Unit](error: CryptoPublicStoreError)
} yield ()
}
.valueOr(err => throw CryptoPublicStoreError.ErrorCode.Wrap(err).asGrpcError)
_ = logger.info(s"Uploading key ${validatedName}")
_ <- CantonGrpcUtil.mapErrNewEUS(
cryptoPrivateStore
.storePrivateKey(keyPair.privateKey, validatedName)
.leftMap(err => CryptoPrivateStoreError.ErrorCode.Wrap(err))
)
_ <- cryptoPrivateStore
.storePrivateKey(keyPair.privateKey, validatedName)
.valueOr(err => throw CryptoPrivateStoreError.ErrorCode.Wrap(err).asGrpcError)
} yield ()
for {
validatedName <- Future(
validatedName <- FutureUnlessShutdown.pure(
OptionUtil
.emptyStringAsNone(request.name)
.traverse(KeyName.create)
@ -393,33 +389,34 @@ class GrpcVaultService(
)
// Decrypt the keypair if a password is provided
keyPair <- OptionUtil.emptyStringAsNone(request.password) match {
case Some(password) =>
val resultE = for {
encrypted <- PasswordBasedEncrypted
.fromTrustedByteString(request.keyPair)
.leftMap(err => ProtoDeserializationFailure.WrapNoLogging(err).asGrpcError)
keyPair <-
OptionUtil.emptyStringAsNone(request.password) match {
case Some(password) =>
val resultE = for {
encrypted <- PasswordBasedEncrypted
.fromTrustedByteString(request.keyPair)
.leftMap(err => ProtoDeserializationFailure.WrapNoLogging(err).asGrpcError)
keyPair <- crypto.pureCrypto
.decryptWithPassword(encrypted, password)(parseKeyPair)
.leftMap(err =>
Status.FAILED_PRECONDITION
.withDescription(s"Failed to decrypt encrypted keypair with password: $err")
.asRuntimeException()
)
} yield keyPair
keyPair <- crypto.pureCrypto
.decryptWithPassword(encrypted, password)(parseKeyPair)
.leftMap(err =>
Status.FAILED_PRECONDITION
.withDescription(s"Failed to decrypt encrypted keypair with password: $err")
.asRuntimeException()
)
} yield keyPair
resultE.toFuture(identity)
resultE.toFutureUS(identity)
case None =>
parseKeyPair(request.keyPair).toFuture { err =>
ProtoDeserializationFailure.WrapNoLoggingStr(err.message).asGrpcError
}
}
case None =>
parseKeyPair(request.keyPair).toFutureUS { err =>
ProtoDeserializationFailure.WrapNoLoggingStr(err.message).asGrpcError
}
}
_ <- loadKeyPair(validatedName, keyPair)
} yield v30.ImportKeyPairResponse()
}
}.failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError)
override def deleteKeyPair(
request: v30.DeleteKeyPairRequest
@ -453,7 +450,7 @@ object GrpcVaultService {
enablePreviewFeatures: Boolean,
timeouts: ProcessingTimeout,
loggerFactory: NamedLoggerFactory,
)(implicit ec: ExecutionContext): GrpcVaultService
)(implicit ec: ExecutionContext, err: ErrorLoggingContext): GrpcVaultService
}
class CommunityGrpcVaultServiceFactory extends GrpcVaultServiceFactory {
@ -462,7 +459,7 @@ object GrpcVaultService {
enablePreviewFeatures: Boolean,
timeouts: ProcessingTimeout,
loggerFactory: NamedLoggerFactory,
)(implicit ec: ExecutionContext): GrpcVaultService =
)(implicit ec: ExecutionContext, err: ErrorLoggingContext): GrpcVaultService =
new GrpcVaultService(crypto, enablePreviewFeatures, loggerFactory)
}
}

View File

@ -4,10 +4,10 @@
package com.digitalasset.canton.crypto.provider.jce
import cats.data.EitherT
import cats.instances.future.*
import cats.syntax.either.*
import com.digitalasset.canton.crypto.*
import com.digitalasset.canton.crypto.store.CryptoPrivateStoreExtended
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.tracing.TraceContext
import com.google.crypto.tink.subtle.EllipticCurves.CurveType
import com.google.crypto.tink.subtle.{Ed25519Sign, EllipticCurves}
@ -15,7 +15,7 @@ import com.google.protobuf.ByteString
import java.security.spec.{ECGenParameterSpec, RSAKeyGenParameterSpec}
import java.security.{GeneralSecurityException, KeyPair as JKeyPair, KeyPairGenerator}
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.ExecutionContext
class JcePrivateCrypto(
pureCrypto: JcePureCrypto,
@ -68,7 +68,7 @@ class JcePrivateCrypto(
override protected[crypto] def generateEncryptionKeypair(scheme: EncryptionKeyScheme)(implicit
traceContext: TraceContext
): EitherT[Future, EncryptionKeyGenerationError, EncryptionKeyPair] = {
): EitherT[FutureUnlessShutdown, EncryptionKeyGenerationError, EncryptionKeyPair] = {
def convertJavaKeyPair(
javaKeyPair: JKeyPair
@ -116,13 +116,13 @@ class JcePrivateCrypto(
override protected[crypto] def generateSigningKeypair(scheme: SigningKeyScheme)(implicit
traceContext: TraceContext
): EitherT[Future, SigningKeyGenerationError, SigningKeyPair] = scheme match {
): EitherT[FutureUnlessShutdown, SigningKeyGenerationError, SigningKeyPair] = scheme match {
case SigningKeyScheme.Ed25519 =>
for {
rawKeyPair <- Either
.catchOnly[GeneralSecurityException](Ed25519Sign.KeyPair.newKeyPair())
.leftMap[SigningKeyGenerationError](SigningKeyGenerationError.GeneralError)
.toEitherT
.toEitherT[FutureUnlessShutdown]
publicKey = ByteString.copyFrom(rawKeyPair.getPublicKey)
privateKey = ByteString.copyFrom(rawKeyPair.getPrivateKey)
keyPair = SigningKeyPair

View File

@ -17,7 +17,7 @@ import com.digitalasset.canton.serialization.{
}
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.{ErrorUtil, ShowUtil}
import com.digitalasset.canton.version.{HasVersionedToByteString, ProtocolVersion}
import com.digitalasset.canton.version.{HasToByteString, HasVersionedToByteString, ProtocolVersion}
import com.google.crypto.tink.hybrid.subtle.AeadOrDaead
import com.google.crypto.tink.subtle.EllipticCurves.EcdsaEncoding
import com.google.crypto.tink.subtle.Enums.HashType
@ -472,10 +472,9 @@ class JcePureCrypto(
publicKey.fingerprint,
)
override def encryptWith[M <: HasVersionedToByteString](
message: M,
private def encryptWith[M](
bytes: ByteString,
publicKey: EncryptionPublicKey,
version: ProtocolVersion,
): Either[EncryptionError, AsymmetricEncrypted[M]] =
publicKey.scheme match {
case EncryptionKeyScheme.EciesP256HkdfHmacSha256Aes128Gcm =>
@ -504,11 +503,7 @@ class JcePureCrypto(
.leftMap(err => EncryptionError.InvalidEncryptionKey(err.toString))
ciphertext <- Either
.catchOnly[GeneralSecurityException](
encrypter
.encrypt(
message.toByteString(version).toByteArray,
Array[Byte](),
)
encrypter.encrypt(bytes.toByteArray, Array[Byte]())
)
.leftMap(err => EncryptionError.FailedToEncrypt(err.toString))
encrypted = new AsymmetricEncrypted[M](
@ -517,19 +512,24 @@ class JcePureCrypto(
)
} yield encrypted
case EncryptionKeyScheme.EciesP256HmacSha256Aes128Cbc =>
encryptWithEciesP256HmacSha256Aes128Cbc(
message.toByteString(version),
publicKey,
JceSecureRandom.random.get(),
)
encryptWithEciesP256HmacSha256Aes128Cbc(bytes, publicKey, JceSecureRandom.random.get())
case EncryptionKeyScheme.Rsa2048OaepSha256 =>
encryptWithRSA2048OaepSha256(
message.toByteString(version),
publicKey,
JceSecureRandom.random.get(),
)
encryptWithRSA2048OaepSha256(bytes, publicKey, JceSecureRandom.random.get())
}
override def encryptWith[M <: HasVersionedToByteString](
message: M,
publicKey: EncryptionPublicKey,
version: ProtocolVersion,
): Either[EncryptionError, AsymmetricEncrypted[M]] =
encryptWith(message.toByteString(version), publicKey)
override def encryptWith[M <: HasToByteString](
message: M,
publicKey: EncryptionPublicKey,
): Either[EncryptionError, AsymmetricEncrypted[M]] =
encryptWith(message.toByteString, publicKey)
override def encryptDeterministicWith[M <: HasVersionedToByteString](
message: M,
publicKey: EncryptionPublicKey,
@ -697,10 +697,9 @@ class JcePureCrypto(
}
}
override def encryptWith[M <: HasVersionedToByteString](
message: M,
private def encryptWith[M](
bytes: ByteString,
symmetricKey: SymmetricKey,
version: ProtocolVersion,
): Either[EncryptionError, Encrypted[M]] =
symmetricKey.scheme match {
case SymmetricKeyScheme.Aes128Gcm =>
@ -710,14 +709,24 @@ class JcePureCrypto(
Set(CryptoKeyFormat.Raw),
EncryptionError.InvalidSymmetricKey,
)
encryptedBytes <- encryptAes128Gcm(
message.toByteString(version),
symmetricKey.key,
)
encryptedBytes <- encryptAes128Gcm(bytes, symmetricKey.key)
encrypted = new Encrypted[M](encryptedBytes)
} yield encrypted
}
override def encryptWith[M <: HasVersionedToByteString](
message: M,
symmetricKey: SymmetricKey,
version: ProtocolVersion,
): Either[EncryptionError, Encrypted[M]] =
encryptWith(message.toByteString(version), symmetricKey)
override def encryptWith[M <: HasToByteString](
message: M,
symmetricKey: SymmetricKey,
): Either[EncryptionError, Encrypted[M]] =
encryptWith(message.toByteString, symmetricKey)
override def decryptWith[M](encrypted: Encrypted[M], symmetricKey: SymmetricKey)(
deserialize: ByteString => Either[DeserializationError, M]
): Either[DecryptionError, M] =

View File

@ -5,7 +5,6 @@ package com.digitalasset.canton.data
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.LfPartyId
import com.digitalasset.canton.config.RequireTypes.NonNegativeInt
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.protocol.*
@ -28,8 +27,7 @@ object TransactionViewDecomposition {
* This is the case for all root nodes of the underlying transaction or if the parent node has fewer informee participants.
*
* @param rootNode the node constituting the view
* @param informees the informees of rootNode
* @param threshold the threshold of rootNode
* @param viewConfirmationParameters contains both the informees of rootNode and quorums
* @param rootSeed the seed of the rootNode
* @param tailNodes all core nodes except `rootNode` and all child views, sorted in pre-order traversal order
*
@ -37,8 +35,7 @@ object TransactionViewDecomposition {
*/
final case class NewView(
rootNode: LfActionNode,
informees: Set[Informee],
threshold: NonNegativeInt,
viewConfirmationParameters: ViewConfirmationParameters,
rootSeed: Option[LfHash],
override val nodeId: LfNodeId,
tailNodes: Seq[TransactionViewDecomposition],
@ -47,9 +44,9 @@ object TransactionViewDecomposition {
childViews.foreach { sv =>
require(
(sv.informees, sv.threshold) != ((informees, threshold)),
s"Children must have different informees or thresholds than parent. " +
s"Found threshold $threshold and informees $informees",
sv.viewConfirmationParameters != viewConfirmationParameters,
s"Children must have different informees or quorums than parent. " +
s"Found informees ${viewConfirmationParameters.informees} and quorums ${viewConfirmationParameters.quorums}",
)
}
@ -61,23 +58,26 @@ object TransactionViewDecomposition {
def childViews: Seq[NewView] = tailNodes.collect { case v: NewView => v }
/** This view with the submittingAdminParty (if defined) added as extra confirming party.
/** This view with the submittingAdminParty (if defined) added as an extra confirming party.
* This needs to be called on root views to guarantee proper authorization.
* It adds an extra quorum with the submitting party.
*/
def withSubmittingAdminParty(
submittingAdminPartyO: Option[LfPartyId],
confirmationPolicy: ConfirmationPolicy,
): NewView = {
val (newInformees, newThreshold) =
confirmationPolicy.withSubmittingAdminParty(submittingAdminPartyO)(informees, threshold)
copy(informees = newInformees, threshold = newThreshold)
val newViewConfirmationParameters =
confirmationPolicy.withSubmittingAdminParty(submittingAdminPartyO)(
viewConfirmationParameters
)
copy(
viewConfirmationParameters = newViewConfirmationParameters
)
}
override def pretty: Pretty[NewView] = prettyOfClass(
param("root node template", _.rootNode.templateId),
param("informees", _.informees),
param("threshold", _.threshold),
param("view confirmation parameters", _.viewConfirmationParameters),
param("node ID", _.nodeId),
param("rollback context", _.rbContext),
param("tail nodes", _.tailNodes),
@ -106,9 +106,9 @@ object TransactionViewDecomposition {
views match {
case head +: rest =>
head match {
case (newView: TransactionViewDecomposition.NewView) =>
case newView: TransactionViewDecomposition.NewView =>
countNestedViews(newView.tailNodes ++ rest, count + 1)
case (_: TransactionViewDecomposition.SameView) =>
case _: TransactionViewDecomposition.SameView =>
countNestedViews(rest, count)
}
case _ => // scala compiler is not happy matching on Seq() thinking that there is some other missing case

View File

@ -4,183 +4,241 @@
package com.digitalasset.canton.data
import cats.data.Chain
import cats.syntax.functor.*
import cats.syntax.functorFilter.*
import com.daml.lf.transaction.NodeId
import com.digitalasset.canton.LfPartyId
import com.digitalasset.canton.config.RequireTypes.NonNegativeInt
import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt}
import com.digitalasset.canton.data.TransactionViewDecomposition.{NewView, SameView}
import com.digitalasset.canton.protocol.WellFormedTransaction.WithoutSuffixes
import com.digitalasset.canton.protocol.*
import com.digitalasset.canton.topology.ParticipantId
import com.digitalasset.canton.topology.client.TopologySnapshot
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.LfTransactionUtil
import scala.concurrent.{ExecutionContext, Future}
trait TransactionViewDecompositionFactory {
case object TransactionViewDecompositionFactory {
/** Converts `transaction: Transaction` into the corresponding `ViewDecomposition`s.
/** Keeps track of the state of the transaction view tree.
*
* @param views chains `NewView` and `SameView` as they get created to construct a transaction view tree
* @param informees is used to aggregate the informees' partyId until a NewView is created
* @param quorums is used to aggregate the different quorums (originated from the different ActionNodes) until a
* NewView is created
*/
final private case class BuildState[V](
views: Chain[V] = Chain.empty,
informees: Set[LfPartyId] = Set.empty,
quorums: Chain[Quorum] = Chain.empty,
rollbackContext: RollbackContext = RollbackContext.empty,
) {
def withViews(
views: Chain[V],
informees: Set[LfPartyId],
quorums: Chain[Quorum],
rollbackContext: RollbackContext,
): BuildState[V] =
BuildState[V](
this.views ++ views,
this.informees ++ informees,
this.quorums ++ quorums,
rollbackContext,
)
def withNewView(view: V, rollbackContext: RollbackContext): BuildState[V] = {
BuildState[V](views :+ view, Set.empty, Chain.empty, rollbackContext)
}
def childState: BuildState[TransactionViewDecomposition] =
BuildState(Chain.empty, Set.empty, Chain.empty, rollbackContext)
def enterRollback(): BuildState[V] = copy(rollbackContext = rollbackContext.enterRollback)
def exitRollback(): BuildState[V] = copy(rollbackContext = rollbackContext.exitRollback)
}
final private case class ActionNodeInfo(
informees: Map[LfPartyId, Set[ParticipantId]],
quorum: Quorum,
children: Seq[LfNodeId],
seed: Option[LfHash],
) {
lazy val participants: Set[ParticipantId] = informees.values.flatten.toSet
}
final private case class Builder(
nodesM: Map[LfNodeId, LfNode],
actionNodeInfoM: Map[LfNodeId, ActionNodeInfo],
) {
private def node(nodeId: LfNodeId): LfNode = nodesM.getOrElse(
nodeId,
throw new IllegalStateException(s"Did not find $nodeId in node map"),
)
private def build(nodeId: LfNodeId, state: BuildState[NewView]): BuildState[NewView] =
node(nodeId) match {
case actionNode: LfActionNode =>
val info = actionNodeInfoM(nodeId)
buildNewView[NewView](nodeId, actionNode, info, state)
case rollbackNode: LfNodeRollback =>
builds(rollbackNode.children.toSeq, state.enterRollback()).exitRollback()
}
def builds(nodeIds: Seq[LfNodeId], state: BuildState[NewView]): BuildState[NewView] =
nodeIds.foldLeft(state)((s, nid) => build(nid, s))
private def buildNewView[V >: NewView](
nodeId: LfNodeId,
actionNode: LfActionNode,
info: ActionNodeInfo,
state: BuildState[V],
): BuildState[V] = {
val childState = info.children.foldLeft(state.childState) { (bs, nId) =>
buildChildView(nId, info.participants, bs)
}
val newView = NewView(
LfTransactionUtil.lightWeight(actionNode),
/* We can use tryCreate here because at this point we only have one quorum
* that is generated directly from the informees of the action node.
* Only later in the process (after the tree is built) do we aggregate all the children
* unique quorums together into a list held by the `parent` view.
*/
ViewConfirmationParameters
.tryCreate(
info.informees.keySet ++ childState.informees,
(info.quorum +: childState.quorums.toList).distinct,
),
info.seed,
nodeId,
childState.views.toList,
state.rollbackContext,
)
state.withNewView(newView, childState.rollbackContext)
}
private def buildChildView(
nodeId: LfNodeId,
currentParticipants: Set[ParticipantId],
state: BuildState[TransactionViewDecomposition],
): BuildState[TransactionViewDecomposition] = {
/* The recipients of a transaction node are all participants that
* host a witness of the node. So we should look at the participant recipients of
* a node to decide when a new view is needed. In particular, a change in the informees triggers a new view only if
* a new informee participant enters the game.
*/
def needNewView(
node: ActionNodeInfo,
currentParticipants: Set[ParticipantId],
): Boolean = !node.participants.subsetOf(currentParticipants)
node(nodeId) match {
case actionNode: LfActionNode =>
val info = actionNodeInfoM(nodeId)
if (!needNewView(info, currentParticipants)) {
val sameView = SameView(
LfTransactionUtil.lightWeight(actionNode),
nodeId,
state.rollbackContext,
)
val childState = info.children.foldLeft(state.childState) { (bs, nId) =>
buildChildView(nId, currentParticipants, bs)
}
state
.withViews(
sameView +: childState.views,
info.informees.keySet ++ childState.informees,
info.quorum +: childState.quorums,
childState.rollbackContext,
)
} else
buildNewView(nodeId, actionNode, info, state)
case rollbackNode: LfNodeRollback =>
rollbackNode.children
.foldLeft(state.enterRollback()) { (bs, nId) =>
buildChildView(nId, currentParticipants, bs)
}
.exitRollback()
}
}
}
def fromTransaction(
confirmationPolicy: ConfirmationPolicy,
topologySnapshot: TopologySnapshot,
transaction: WellFormedTransaction[WithoutSuffixes],
viewRbContext: RollbackContext,
submittingAdminPartyO: Option[LfPartyId],
)(implicit ec: ExecutionContext, tc: TraceContext): Future[Seq[NewView]]
}
)(implicit ec: ExecutionContext, tc: TraceContext): Future[Seq[NewView]] = {
object TransactionViewDecompositionFactory {
val tx: LfVersionedTransaction = transaction.unwrap
private type ConformationPolicy = (Set[Informee], NonNegativeInt)
def apply(): TransactionViewDecompositionFactory = V2
private[data] object V2 extends TransactionViewDecompositionFactory {
private final case class ActionNodeInfo(
informees: Set[Informee],
threshold: NonNegativeInt,
children: Seq[LfNodeId],
seed: Option[LfHash],
) {
def confirmationPolicy: (Set[Informee], NonNegativeInt) = (informees, threshold)
}
private final case class BuildState[V](
views: Chain[V] = Chain.empty,
rollbackContext: RollbackContext = RollbackContext.empty,
) {
def withViews(
views: Chain[V],
rollbackContext: RollbackContext,
): BuildState[V] =
BuildState[V](this.views ++ views, rollbackContext)
def withNewView(view: V, rollbackContext: RollbackContext): BuildState[V] = {
BuildState[V](views :+ view, rollbackContext)
}
def childState: BuildState[TransactionViewDecomposition] =
BuildState(Chain.empty, rollbackContext)
def enterRollback(): BuildState[V] = copy(rollbackContext = rollbackContext.enterRollback)
def exitRollback(): BuildState[V] = copy(rollbackContext = rollbackContext.exitRollback)
}
private final case class Builder(
nodesM: Map[LfNodeId, LfNode],
actionNodeInfoM: Map[LfNodeId, ActionNodeInfo],
) {
private def node(nodeId: LfNodeId): LfNode = nodesM.getOrElse(
nodeId,
throw new IllegalStateException(s"Did not find $nodeId in node map"),
)
private def actionNodeInfo(nodeId: LfNodeId): ActionNodeInfo =
actionNodeInfoM.getOrElse(
nodeId,
throw new IllegalStateException(s"Did not find $nodeId in policy map"),
)
private def build(nodeId: LfNodeId, state: BuildState[NewView]): BuildState[NewView] = {
node(nodeId) match {
case actionNode: LfActionNode =>
buildNewView[NewView](nodeId, actionNode, actionNodeInfo(nodeId), state)
case rollbackNode: LfNodeRollback =>
rollbackNode.children
.foldLeft(state.enterRollback()) { (bs, nId) =>
build(nId, bs)
}
.exitRollback()
}
}
def builds(nodeIds: Seq[LfNodeId], state: BuildState[NewView]): BuildState[NewView] = {
nodeIds.foldLeft(state)((s, nid) => build(nid, s))
}
private def buildNewView[V >: NewView](
nodeId: LfNodeId,
actionNode: LfActionNode,
info: ActionNodeInfo,
state: BuildState[V],
): BuildState[V] = {
val childState = info.children.foldLeft(state.childState) { (bs, nId) =>
buildChildView(nId, info.confirmationPolicy, bs)
}
val newView = NewView(
LfTransactionUtil.lightWeight(actionNode),
info.informees,
info.threshold,
info.seed,
nodeId,
childState.views.toList,
state.rollbackContext,
)
state.withNewView(newView, childState.rollbackContext)
}
private def buildChildView(
nodeId: LfNodeId,
parentConfirmationPolicy: ConformationPolicy,
state: BuildState[TransactionViewDecomposition],
): BuildState[TransactionViewDecomposition] = {
node(nodeId) match {
case actionNode: LfActionNode =>
val info = actionNodeInfoM(nodeId)
if (parentConfirmationPolicy == info.confirmationPolicy) {
val sameView =
SameView(LfTransactionUtil.lightWeight(actionNode), nodeId, state.rollbackContext)
val childState = info.children.foldLeft(state.childState) { (bs, nId) =>
buildChildView(nId, parentConfirmationPolicy, bs)
}
state.withViews(sameView +: childState.views, childState.rollbackContext)
} else {
buildNewView(nodeId, actionNode, info, state)
}
case rollbackNode: LfNodeRollback =>
rollbackNode.children
.foldLeft(state.enterRollback()) { (bs, nId) =>
buildChildView(nId, parentConfirmationPolicy, bs)
}
.exitRollback()
}
}
}
override def fromTransaction(
confirmationPolicy: ConfirmationPolicy,
topologySnapshot: TopologySnapshot,
transaction: WellFormedTransaction[WithoutSuffixes],
viewRbContext: RollbackContext,
submittingAdminPartyO: Option[LfPartyId],
)(implicit ec: ExecutionContext, tc: TraceContext): Future[Seq[NewView]] = {
val tx: LfVersionedTransaction = transaction.unwrap
val policyMapF = tx.nodes.collect({ case (nodeId, node: LfActionNode) =>
val itF = confirmationPolicy.informeesAndThreshold(node, topologySnapshot)
val policyMapF: Iterable[Future[(NodeId, ActionNodeInfo)]] =
tx.nodes.collect({ case (nodeId, node: LfActionNode) =>
val childNodeIds = node match {
case e: LfNodeExercises => e.children.toSeq
case _ => Seq.empty
}
itF.map({ case (i, t) =>
nodeId -> ActionNodeInfo(i, t, childNodeIds, transaction.seedFor(nodeId))
})
createActionNodeInfo(
confirmationPolicy,
topologySnapshot,
node,
nodeId,
childNodeIds,
transaction,
)
})
Future.sequence(policyMapF).map(_.toMap).map { policyMap =>
Builder(tx.nodes, policyMap)
.builds(tx.roots.toSeq, BuildState[NewView](rollbackContext = viewRbContext))
.views
.map(_.withSubmittingAdminParty(submittingAdminPartyO, confirmationPolicy))
.toList
}
Future.sequence(policyMapF).map(_.toMap).map { policyMap =>
Builder(tx.nodes, policyMap)
.builds(tx.roots.toSeq, BuildState[NewView](rollbackContext = viewRbContext))
.views
.map(
_.withSubmittingAdminParty(submittingAdminPartyO, confirmationPolicy)
)
.toList
}
}
private def createActionNodeInfo(
confirmationPolicy: ConfirmationPolicy,
topologySnapshot: TopologySnapshot,
node: LfActionNode,
nodeId: LfNodeId,
childNodeIds: Seq[LfNodeId],
transaction: WellFormedTransaction[WithoutSuffixes],
)(implicit ec: ExecutionContext, tc: TraceContext): Future[(LfNodeId, ActionNodeInfo)] = {
def createQuorum(
informeesMap: Map[LfPartyId, (Set[ParticipantId], NonNegativeInt)],
threshold: NonNegativeInt,
): Quorum = {
Quorum(
informeesMap.mapFilter { case (_, weight) =>
Option.when(weight.unwrap > 0)(
PositiveInt.tryCreate(weight.unwrap)
)
},
threshold,
)
}
val itF = confirmationPolicy.informeesParticipantsAndThreshold(node, topologySnapshot)
itF.map({ case (i, t) =>
nodeId -> ActionNodeInfo(
i.fmap { case (participants, _) => participants },
createQuorum(i, t),
childNodeIds,
transaction.seedFor(nodeId),
)
})
}
}

View File

@ -698,7 +698,6 @@ abstract class CantonNodeBootstrapImpl[
namespaceKeyO <- crypto.cryptoPublicStore
.signingKey(nodeId.fingerprint)
.leftMap(_.toString)
.mapK(FutureUnlessShutdown.outcomeK)
namespaceKey <- EitherT.fromEither[FutureUnlessShutdown](
namespaceKeyO.toRight(
s"Performing auto-init but can't find key ${nodeId.fingerprint} from previous step"
@ -819,7 +818,11 @@ object CantonNodeBootstrapImpl {
private def getKeyByFingerprint[P <: PublicKey](
typ: String,
findPubKeyIdByFingerprint: Fingerprint => EitherT[Future, CryptoPublicStoreError, Option[P]],
findPubKeyIdByFingerprint: Fingerprint => EitherT[
FutureUnlessShutdown,
CryptoPublicStoreError,
Option[P],
],
existPrivateKeyByFp: Fingerprint => EitherT[
FutureUnlessShutdown,
CryptoPrivateStoreError,
@ -831,7 +834,6 @@ object CantonNodeBootstrapImpl {
.leftMap(err =>
s"Failure while looking for $typ fingerprint $fingerprint in public store: $err"
)
.mapK(FutureUnlessShutdown.outcomeK)
pubKey <- keyIdO.fold(
EitherT.leftT[FutureUnlessShutdown, P](
s"$typ key with fingerprint $fingerprint does not exist"
@ -853,7 +855,9 @@ object CantonNodeBootstrapImpl {
private def getOrCreateKey[P <: PublicKey](
typ: String,
findPubKeyIdByName: KeyName => EitherT[Future, CryptoPublicStoreError, Option[P]],
findPubKeyIdByName: KeyName => EitherT[FutureUnlessShutdown, CryptoPublicStoreError, Option[
P
]],
generateKey: Option[KeyName] => EitherT[FutureUnlessShutdown, String, P],
existPrivateKeyByFp: Fingerprint => EitherT[
FutureUnlessShutdown,
@ -865,7 +869,6 @@ object CantonNodeBootstrapImpl {
keyName <- EitherT.fromEither[FutureUnlessShutdown](KeyName.create(name))
keyIdO <- findPubKeyIdByName(keyName)
.leftMap(err => s"Failure while looking for $typ key $name in public store: $err")
.mapK(FutureUnlessShutdown.outcomeK)
pubKey <- keyIdO.fold(
generateKey(Some(keyName))
.leftMap(err => s"Failure while generating $typ key for $name: $err")

View File

@ -220,6 +220,34 @@ object SequencerHealthStatus extends PrettyUtil with ShowUtil {
)
}
/** Admin status of the sequencer node.
* @param acceptsAdminChanges implementation specific flag indicating whether the sequencer node accepts administration commands
*/
final case class SequencerAdminStatus(acceptsAdminChanges: Boolean)
extends ToComponentHealthState
with PrettyPrinting {
def toProtoV30: v30.SequencerAdminStatus = v30.SequencerAdminStatus(acceptsAdminChanges)
override def toComponentHealthState: ComponentHealthState =
ComponentHealthState.Ok(Option.when(acceptsAdminChanges)("sequencer accepts admin commands"))
override def pretty: Pretty[SequencerAdminStatus] =
SequencerAdminStatus.prettySequencerHealthStatus
}
object SequencerAdminStatus extends PrettyUtil with ShowUtil {
def fromProto(
statusP: v30.SequencerAdminStatus
): ParsingResult[SequencerAdminStatus] =
Right(SequencerAdminStatus(statusP.acceptsAdminChanges))
implicit val implicitPrettyString: Pretty[String] = PrettyInstances.prettyString
implicit val prettySequencerHealthStatus: Pretty[SequencerAdminStatus] =
prettyOfClass[SequencerAdminStatus](
param("admin", _.acceptsAdminChanges)
)
}
/** Topology manager queue status
*
* Status around topology management queues
@ -250,74 +278,6 @@ object TopologyQueueStatus {
}
}
final case class DomainStatus(
uid: UniqueIdentifier,
uptime: Duration,
ports: Map[String, Port],
connectedParticipants: Seq[ParticipantId],
sequencer: SequencerHealthStatus,
topologyQueue: TopologyQueueStatus,
components: Seq[ComponentStatus],
) extends NodeStatus.Status {
val id: DomainId = DomainId(uid)
// A domain node is not replicated and always active
override def active: Boolean = true
override def pretty: Pretty[DomainStatus] =
prettyOfString(_ =>
Seq(
s"Domain id: ${uid.toProtoPrimitive}",
show"Uptime: $uptime",
s"Ports: ${portsString(ports)}",
s"Connected Participants: ${multiline(connectedParticipants.map(_.toString))}",
show"Sequencer: $sequencer",
s"Components: ${multiline(components.map(_.toString))}",
).mkString(System.lineSeparator())
)
def toProtoV30: v30.StatusResponse.Status = {
val participants = connectedParticipants.map(_.toProtoPrimitive)
SimpleStatus(uid, uptime, ports, active, topologyQueue, components).toProtoV30
.copy(
extra = v30.DomainStatusInfo(participants, Some(sequencer.toProtoV30)).toByteString
)
}
}
object DomainStatus {
def fromProtoV30(proto: v30.StatusResponse.Status): ParsingResult[DomainStatus] =
for {
status <- SimpleStatus.fromProtoV30(proto)
domainStatus <- ProtoConverter
.parse[DomainStatus, v30.DomainStatusInfo](
v30.DomainStatusInfo.parseFrom,
domainStatusInfoP => {
for {
participants <- domainStatusInfoP.connectedParticipants.traverse(pId =>
ParticipantId.fromProtoPrimitive(pId, s"DomainStatus.connectedParticipants")
)
sequencer <- ProtoConverter.parseRequired(
SequencerHealthStatus.fromProto,
"sequencer",
domainStatusInfoP.sequencer,
)
} yield DomainStatus(
status.uid,
status.uptime,
status.ports,
participants,
sequencer,
status.topologyQueue,
status.components,
)
},
proto.extra,
)
} yield domainStatus
}
final case class ParticipantStatus(
uid: UniqueIdentifier,
uptime: Duration,
@ -399,6 +359,7 @@ final case class SequencerNodeStatus(
connectedParticipants: Seq[ParticipantId],
sequencer: SequencerHealthStatus,
topologyQueue: TopologyQueueStatus,
admin: SequencerAdminStatus,
components: Seq[ComponentStatus],
) extends NodeStatus.Status {
override def active: Boolean = sequencer.isActive
@ -406,7 +367,12 @@ final case class SequencerNodeStatus(
val participants = connectedParticipants.map(_.toProtoPrimitive)
SimpleStatus(uid, uptime, ports, active, topologyQueue, components).toProtoV30.copy(
extra = v30
.SequencerNodeStatus(participants, sequencer.toProtoV30.some, domainId.toProtoPrimitive)
.SequencerNodeStatus(
participants,
sequencer.toProtoV30.some,
domainId.toProtoPrimitive,
admin.toProtoV30.some,
)
.toByteString
)
}
@ -420,6 +386,7 @@ final case class SequencerNodeStatus(
s"Ports: ${portsString(ports)}",
s"Connected Participants: ${multiline(connectedParticipants.map(_.toString))}",
show"Sequencer: $sequencer",
s"Accepts admin changes: ${admin.acceptsAdminChanges}",
s"details-extra: ${sequencer.details}",
s"Components: ${multiline(components.map(_.toString))}",
).mkString(System.lineSeparator())
@ -448,6 +415,11 @@ object SequencerNodeStatus {
sequencerNodeStatusP.domainId,
s"SequencerNodeStatus.domainId",
)
admin <- ProtoConverter.parseRequired(
SequencerAdminStatus.fromProto,
"admin",
sequencerNodeStatusP.admin,
)
} yield SequencerNodeStatus(
status.uid,
domainId,
@ -456,6 +428,7 @@ object SequencerNodeStatus {
participants,
sequencer,
status.topologyQueue,
admin,
status.components,
),
sequencerP.extra,

View File

@ -6,25 +6,38 @@ package com.digitalasset.canton.protocol
import com.daml.lf.data.Ref.PackageId
import com.digitalasset.canton.LfPackageId
import com.digitalasset.canton.config.CantonRequireTypes.String256M
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.tracing.TraceContext
import slick.jdbc.GetResult
import slick.jdbc.GetResult.GetInt
import scala.concurrent.Future
/** @param packageId the unique identifier for the package
* @param sourceDescription an informal human readable description of what the package contains
* @param uploadedAt The package upload timestamp
* @param packageSize The LF archive protobuf-serialized size in bytes
*/
final case class PackageDescription(packageId: LfPackageId, sourceDescription: String256M)
final case class PackageDescription(
packageId: LfPackageId,
sourceDescription: String256M,
uploadedAt: CantonTimestamp,
packageSize: Int,
)
object PackageDescription {
import com.digitalasset.canton.resource.DbStorage.Implicits.*
implicit val getResult: GetResult[PackageDescription] =
GetResult.createGetTuple2(GetResult[LfPackageId], GetResult[String256M]).andThen {
case (packageId, sourceDescription) =>
PackageDescription(packageId, sourceDescription)
}
GetResult
.createGetTuple4(
GetResult[LfPackageId],
GetResult[String256M],
GetResult[CantonTimestamp],
GetResult[Int],
)
.andThen(Function.tupled(PackageDescription.apply))
}
trait PackageInfoService {

View File

@ -113,7 +113,8 @@ trait StoreBasedDomainOutboxDispatchHelper extends DomainOutboxDispatchHelper {
.toRight("")
.leftFlatMap { _ =>
// We did not find a topology transaction with the correct version, so we try to convert and resign
SignedTopologyTransaction.asVersion(tx, protocolVersion)(crypto)
SignedTopologyTransaction
.asVersion(tx, protocolVersion)(crypto)
}
}
}
@ -136,7 +137,8 @@ trait QueueBasedDomainOutboxDispatchHelper extends DomainOutboxDispatchHelper {
// Transaction already in the correct version, nothing to do here
EitherT.rightT[FutureUnlessShutdown, String](tx)
} else {
SignedTopologyTransaction.asVersion(tx, protocolVersion)(crypto)
SignedTopologyTransaction
.asVersion(tx, protocolVersion)(crypto)
}
}
}

View File

@ -98,6 +98,8 @@ class GrpcTopologyManagerWriteService(
IdentifierDelegation.fromProtoV30(mapping)
case Mapping.DomainParametersState(mapping) =>
DomainParametersState.fromProtoV30(mapping)
case Mapping.SequencingDynamicParametersState(mapping) =>
DynamicSequencingParametersState.fromProtoV30(mapping)
case Mapping.MediatorDomainState(mapping) =>
MediatorDomainState.fromProtoV30(mapping)
case Mapping.SequencerDomainState(mapping) =>

View File

@ -36,6 +36,9 @@ trait ComparesLfTransactions {
TxTree(en.copy(children = ImmArray.empty), en.children.toSeq.map(go)*)
case rn: LfNodeRollback =>
TxTree(rn.copy(children = ImmArray.empty), rn.children.toSeq.map(go)*)
// do not delete the package version, once protocol version handle it.
case cn: LfNodeCreate =>
TxTree(cn.copy(packageVersion = None))
case leafNode: LfLeafOnlyActionNode => TxTree(leafNode)
}
tx.roots.toSeq.map(go)

View File

@ -3,15 +3,14 @@
package com.digitalasset.canton.crypto
import com.digitalasset.canton.BaseTest
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.util.HexString
import com.digitalasset.canton.{BaseTest, HasExecutionContext}
import com.google.protobuf.ByteString
import org.scalatest.wordspec.AsyncWordSpec
import scala.concurrent.Future
trait HkdfTest {
this: AsyncWordSpec with BaseTest =>
this: AsyncWordSpec & BaseTest & HasExecutionContext =>
private case class TestCase(
ikm: ByteString,
@ -98,7 +97,7 @@ trait HkdfTest {
),
)
def hkdfProvider(providerF: => Future[HkdfOps with RandomOps]): Unit = {
def hkdfProvider(providerF: => FutureUnlessShutdown[HkdfOps with RandomOps]): Unit = {
"derive keys using HMAC" should {
"pass golden tests from RFC 5869 for extract-and-expand" in {
val algo = HmacAlgorithm.HmacSha256
@ -117,7 +116,7 @@ trait HkdfTest {
expanded.unwrap shouldBe testCase.okm
}
}
}
}.failOnShutdown
}
}
}

View File

@ -3,20 +3,19 @@
package com.digitalasset.canton.crypto
import com.digitalasset.canton.BaseTest
import com.digitalasset.canton.crypto.CryptoTestHelper.TestMessage
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.{BaseTest, HasExecutionContext}
import com.google.protobuf.ByteString
import org.scalatest.wordspec.AsyncWordSpec
import scala.concurrent.Future
trait PasswordBasedEncryptionTest {
this: AsyncWordSpec with BaseTest =>
this: AsyncWordSpec & BaseTest & HasExecutionContext =>
def pbeProvider(
supportedPbkdfSchemes: Set[PbkdfScheme],
supportedSymmetricKeySchemes: Set[SymmetricKeyScheme],
newCrypto: => Future[PasswordBasedEncryptionOps with EncryptionOps],
newCrypto: => FutureUnlessShutdown[PasswordBasedEncryptionOps & EncryptionOps],
): Unit = {
s"encrypt with passwords" should {
@ -32,7 +31,7 @@ trait PasswordBasedEncryptionTest {
pbkey.salt.unwrap.size() shouldEqual pbkdfScheme.defaultSaltLengthInBytes
pbkey.key.key.size() shouldEqual symmetricKeyScheme.keySizeInBytes
}
}
}.failOnShutdown
s"generate the same symmetric key in $symmetricKeyScheme for the same password when given the same salt using $pbkdfScheme" in {
newCrypto.map { crypto =>
@ -52,7 +51,7 @@ trait PasswordBasedEncryptionTest {
pbkey1.salt.unwrap shouldEqual pbkey2.salt.unwrap
pbkey1.key shouldEqual pbkey2.key
}
}
}.failOnShutdown
s"encrypt and decrypt using a password with $symmetricKeyScheme and $pbkdfScheme" in {
newCrypto.map { crypto =>
@ -74,7 +73,7 @@ trait PasswordBasedEncryptionTest {
decrypted shouldEqual message
}
}
}.failOnShutdown
s"encrypt with one password and fail to decrypt with another password using $symmetricKeyScheme and $pbkdfScheme" in {
newCrypto.map { crypto =>
@ -94,7 +93,7 @@ trait PasswordBasedEncryptionTest {
decryptedE.left.value shouldBe a[PasswordBasedEncryptionError.DecryptError]
}
}
}.failOnShutdown
}
}
}

Some files were not shown because too many files have changed in this diff Show More