update canton to c5b68d04 (#19458)

* update canton to c5b68d04

* fix build

* more fix

---------

Co-authored-by: Marcin Ziolek <marcin.ziolek@digitalasset.com>
This commit is contained in:
Remy 2024-06-27 10:18:18 +02:00 committed by GitHub
parent 98a027c2df
commit 64128328d9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
536 changed files with 4760 additions and 5380 deletions

View File

@ -44,10 +44,11 @@ case object BuildInfo {{
val scalaVersion: String = "{scala_version}"
val sbtVersion: String = "bazel"
val damlLibrariesVersion: String = "{sdk_version}"
val protocolVersions = Seq()
val stableProtocolVersions = List()
val betaProtocolVersions = List()
override val toString: String = {{
"version: %s, scalaVersion: %s, sbtVersion: %s, damlLibrariesVersion: %s, protocolVersions: %s".format(
version, scalaVersion, sbtVersion, damlLibrariesVersion, protocolVersions
"version: %s, scalaVersion: %s, sbtVersion: %s, damlLibrariesVersion: %s, stableProtocolVersions: %s, betaProtocolVersions: %s".format(
version, scalaVersion, sbtVersion, damlLibrariesVersion, stableProtocolVersions, betaProtocolVersions
)
}}
}}

View File

@ -18,8 +18,6 @@ service InspectionService {
// Lookup the domain where a contract is currently active.
// Supports querying many contracts at once.
rpc LookupContractDomain(LookupContractDomain.Request) returns (LookupContractDomain.Response);
// Lookup the domain that the transaction was committed over. Can fail with NOT_FOUND if no domain was found.
rpc LookupTransactionDomain(LookupTransactionDomain.Request) returns (LookupTransactionDomain.Response);
// Look up the ledger offset corresponding to the timestamp, specifically the largest offset such that no later
// offset corresponds to a later timestamp than the specified one.
rpc LookupOffsetByTime(LookupOffsetByTime.Request) returns (LookupOffsetByTime.Response);

View File

@ -31,8 +31,10 @@ message TrafficState {
int64 extra_traffic_consumed = 2;
// Amount of base traffic remaining
int64 base_traffic_remainder = 3;
// Cost deducted at `timestamp`, only present when traffic was consumed at `timestamp`, otherwise is set to 0
uint64 last_consumed_cost = 4;
// Timestamp at which the state is valid
int64 timestamp = 4;
int64 timestamp = 5;
// Optional serial of the balance update that updated the extra traffic limit
google.protobuf.UInt32Value serial = 5;
google.protobuf.UInt32Value serial = 6;
}

View File

@ -10,6 +10,9 @@ object ProtocolVersionAnnotation {
/** Marker for stable protocol versions */
sealed trait Stable extends Status
/** Marker for beta protocol versions */
sealed trait Beta extends Status
}
/** Marker trait for Protobuf messages generated by scalapb

View File

@ -4,6 +4,14 @@
package com.digitalasset.canton.admin.api.client.commands
import cats.syntax.either.*
import cats.syntax.traverse.*
import com.daml.ledger.api.v2.admin.command_inspection_service.CommandInspectionServiceGrpc.CommandInspectionServiceStub
import com.daml.ledger.api.v2.admin.command_inspection_service.{
CommandInspectionServiceGrpc,
CommandState,
GetCommandStatusRequest,
GetCommandStatusResponse,
}
import com.daml.ledger.api.v2.admin.identity_provider_config_service.IdentityProviderConfigServiceGrpc.IdentityProviderConfigServiceStub
import com.daml.ledger.api.v2.admin.identity_provider_config_service.*
import com.daml.ledger.api.v2.admin.metering_report_service.MeteringReportServiceGrpc.MeteringReportServiceStub
@ -135,6 +143,7 @@ import com.digitalasset.canton.ledger.client.services.admin.IdentityProviderConf
import com.digitalasset.canton.logging.ErrorLoggingContext
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.networking.grpc.ForwardingStreamObserver
import com.digitalasset.canton.platform.apiserver.execution.CommandStatus
import com.digitalasset.canton.protocol.LfContractId
import com.digitalasset.canton.serialization.ProtoConverter
import com.digitalasset.canton.topology.{DomainId, PartyId}
@ -352,6 +361,34 @@ object LedgerApiCommands {
}
}
object CommandInspectionService {
abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] {
override type Svc = CommandInspectionServiceStub
override def createService(channel: ManagedChannel): CommandInspectionServiceStub =
CommandInspectionServiceGrpc.stub(channel)
}
final case class GetCommandStatus(commandIdPrefix: String, state: CommandState, limit: Int)
extends BaseCommand[GetCommandStatusRequest, GetCommandStatusResponse, Seq[CommandStatus]] {
override def createRequest(): Either[String, GetCommandStatusRequest] = Right(
GetCommandStatusRequest(commandIdPrefix = commandIdPrefix, state = state, limit = limit)
)
override def submitRequest(
service: CommandInspectionServiceStub,
request: GetCommandStatusRequest,
): Future[GetCommandStatusResponse] = service.getCommandStatus(request)
override def handleResponse(
response: GetCommandStatusResponse
): Either[String, Seq[CommandStatus]] = {
response.commandStatus.traverse(CommandStatus.fromProto).leftMap(_.message)
}
}
}
object ParticipantPruningService {
abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] {
override type Svc = ParticipantPruningServiceStub

View File

@ -12,12 +12,12 @@ import com.daml.ledger.api.v2.state_service.{
IncompleteUnassigned,
}
import com.daml.ledger.api.v2.value.{Record, RecordField, Value}
import com.digitalasset.daml.lf.data.Time
import com.digitalasset.canton.admin.api.client.data.TemplateId
import com.digitalasset.canton.crypto.Salt
import com.digitalasset.canton.protocol.LfContractId
import com.digitalasset.canton.topology.DomainId
import com.digitalasset.canton.{LfPackageName, LfPackageVersion}
import com.digitalasset.daml.lf.data.Time
import com.google.protobuf.timestamp.Timestamp
/** Wrapper class to make scalapb LedgerApi classes more convenient to access

View File

@ -57,7 +57,7 @@ import com.digitalasset.canton.topology.{DomainId, ParticipantId, PartyId}
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.BinaryFileUtil
import com.digitalasset.canton.version.ProtocolVersion
import com.digitalasset.canton.{DomainAlias, LedgerTransactionId, SequencerCounter, config}
import com.digitalasset.canton.{DomainAlias, SequencerCounter, config}
import com.google.protobuf.ByteString
import com.google.protobuf.empty.Empty
import com.google.protobuf.timestamp.Timestamp
@ -1057,29 +1057,6 @@ object ParticipantAdminCommands {
}
final case class LookupTransactionDomain(transactionId: LedgerTransactionId)
extends Base[
v30.LookupTransactionDomain.Request,
v30.LookupTransactionDomain.Response,
DomainId,
] {
override def createRequest() = Right(v30.LookupTransactionDomain.Request(transactionId))
override def submitRequest(
service: InspectionServiceStub,
request: v30.LookupTransactionDomain.Request,
): Future[v30.LookupTransactionDomain.Response] =
service.lookupTransactionDomain(request)
override def handleResponse(
response: v30.LookupTransactionDomain.Response
): Either[String, DomainId] =
DomainId.fromString(response.domainId)
override def timeoutType: TimeoutType = DefaultUnboundedTimeout
}
final case class LookupOffsetByTime(ts: Timestamp)
extends Base[v30.LookupOffsetByTime.Request, v30.LookupOffsetByTime.Response, String] {
override def createRequest() = Right(v30.LookupOffsetByTime.Request(Some(ts)))

View File

@ -70,6 +70,7 @@ import com.digitalasset.canton.participant.ParticipantNodeParameters
import com.digitalasset.canton.participant.admin.AdminWorkflowConfig
import com.digitalasset.canton.participant.config.ParticipantInitConfig.ParticipantLedgerApiInitConfig
import com.digitalasset.canton.participant.config.*
import com.digitalasset.canton.participant.sync.CommandProgressTrackerConfig
import com.digitalasset.canton.platform.apiserver.SeedService.Seeding
import com.digitalasset.canton.platform.apiserver.configuration.{
EngineLoggingConfig,
@ -238,6 +239,7 @@ final case class RetentionPeriodDefaults(
* @param startupParallelism Start up to N nodes in parallel (default is num-threads)
* @param nonStandardConfig don't fail config validation on non-standard configuration settings
* @param devVersionSupport If true, allow domain nodes to use unstable protocol versions and participant nodes to connect to such domains
* @param betaVersionSupport If true, allow domain nodes to use beta protocol versions and participant nodes to connect to such domains
* @param timeouts Sets the timeouts used for processing and console
* @param portsFile A ports file name, where the ports of all participants will be written to after startup
* @param exitOnFatalFailures If true the node will exit/stop the process in case of fatal failures
@ -251,6 +253,7 @@ final case class CantonParameters(
nonStandardConfig: Boolean = true,
// TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version
devVersionSupport: Boolean = true,
betaVersionSupport: Boolean = false,
portsFile: Option[String] = None,
timeouts: TimeoutSettings = TimeoutSettings(),
retentionPeriodDefaults: RetentionPeriodDefaults = RetentionPeriodDefaults(),
@ -378,6 +381,7 @@ trait CantonConfig {
protocolConfig = ParticipantProtocolConfig(
minimumProtocolVersion = participantParameters.minimumProtocolVersion.map(_.unwrap),
devVersionSupport = participantParameters.devVersionSupport,
betaVersionSupport = participantParameters.BetaVersionSupport,
dontWarnOnDeprecatedPV = participantParameters.dontWarnOnDeprecatedPV,
),
ledgerApiServerParameters = participantParameters.ledgerApiServer,
@ -388,6 +392,7 @@ trait CantonConfig {
disableUpgradeValidation = participantParameters.disableUpgradeValidation,
allowForUnauthenticatedContractIds =
participantParameters.allowForUnauthenticatedContractIds,
commandProgressTracking = participantParameters.commandProgressTracker,
)
}
@ -519,6 +524,7 @@ private[canton] object CantonNodeParameterConverter {
def protocol(parent: CantonConfig, config: ProtocolConfig): CantonNodeParameters.Protocol =
CantonNodeParameters.Protocol.Impl(
devVersionSupport = parent.parameters.devVersionSupport || config.devVersionSupport,
betaVersionSupport = parent.parameters.betaVersionSupport || config.betaVersionSupport,
dontWarnOnDeprecatedPV = config.dontWarnOnDeprecatedPV,
)
@ -971,9 +977,12 @@ object CantonConfig {
deriveReader[EngineLoggingConfig]
lazy implicit val cantonEngineConfigReader: ConfigReader[CantonEngineConfig] =
deriveReader[CantonEngineConfig]
lazy implicit val participantNodeParameterConfigReader
: ConfigReader[ParticipantNodeParameterConfig] =
@nowarn("cat=unused") lazy implicit val participantNodeParameterConfigReader
: ConfigReader[ParticipantNodeParameterConfig] = {
implicit val commandProgressTrackerConfigReader: ConfigReader[CommandProgressTrackerConfig] =
deriveReader[CommandProgressTrackerConfig]
deriveReader[ParticipantNodeParameterConfig]
}
lazy implicit val timeTrackerConfigReader: ConfigReader[DomainTimeTrackerConfig] =
deriveReader[DomainTimeTrackerConfig]
lazy implicit val timeRequestConfigReader: ConfigReader[TimeProofRequestConfig] =
@ -1385,9 +1394,12 @@ object CantonConfig {
deriveWriter[EngineLoggingConfig]
lazy implicit val cantonEngineConfigWriter: ConfigWriter[CantonEngineConfig] =
deriveWriter[CantonEngineConfig]
lazy implicit val participantNodeParameterConfigWriter
: ConfigWriter[ParticipantNodeParameterConfig] =
@nowarn("cat=unused") lazy implicit val participantNodeParameterConfigWriter
: ConfigWriter[ParticipantNodeParameterConfig] = {
implicit val commandProgressTrackerConfigWriter: ConfigWriter[CommandProgressTrackerConfig] =
deriveWriter[CommandProgressTrackerConfig]
deriveWriter[ParticipantNodeParameterConfig]
}
lazy implicit val timeTrackerConfigWriter: ConfigWriter[DomainTimeTrackerConfig] =
deriveWriter[DomainTimeTrackerConfig]
lazy implicit val timeRequestConfigWriter: ConfigWriter[TimeProofRequestConfig] =

View File

@ -9,7 +9,6 @@ import cats.syntax.functor.*
import cats.syntax.functorFilter.*
import com.daml.nonempty.NonEmpty
import com.daml.nonempty.catsinstances.*
import com.digitalasset.canton.discard.Implicits.DiscardOps
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.version.HandshakeErrors.DeprecatedProtocolVersion
@ -53,19 +52,17 @@ object CommunityConfigValidations
type Validation = CantonCommunityConfig => Validated[NonEmpty[Seq[String]], Unit]
override protected val validations: List[Validation] =
List[Validation](noDuplicateStorage, atLeastOneNode) ++ genericValidations[
CantonCommunityConfig
]
List[Validation](noDuplicateStorage, atLeastOneNode) ++
genericValidations[CantonCommunityConfig]
/** Validations applied to all community and enterprise Canton configurations. */
private[config] def genericValidations[C <: CantonConfig]
: List[C => Validated[NonEmpty[Seq[String]], Unit]] = {
: List[C => Validated[NonEmpty[Seq[String]], Unit]] =
List(
developmentProtocolSafetyCheck,
warnIfUnsafeMinProtocolVersion,
adminTokenSafetyCheckParticipants,
)
}
/** Group node configs by db access to find matching db storage configs.
* Overcomplicated types used are to work around that at this point nodes could have conflicting names so we can't just
@ -207,19 +204,22 @@ object CommunityConfigValidations
devVersionSupport = nodeConfig.parameters.devVersionSupport,
)
}
}
private def warnIfUnsafeMinProtocolVersion(
config: CantonConfig
): Validated[NonEmpty[Seq[String]], Unit] = {
config.participants.toSeq.foreach { case (name, config) =>
val errors = config.participants.toSeq.mapFilter { case (name, config) =>
val minimum = config.parameters.minimumProtocolVersion.map(_.unwrap)
val isMinimumDeprecatedVersion = minimum.getOrElse(ProtocolVersion.minimum).isDeprecated
if (isMinimumDeprecatedVersion && !config.parameters.dontWarnOnDeprecatedPV)
DeprecatedProtocolVersion.WarnParticipant(name, minimum).discard
Option.when(isMinimumDeprecatedVersion && !config.parameters.dontWarnOnDeprecatedPV)(
DeprecatedProtocolVersion.WarnParticipant(name, minimum).cause
)
}
Validated.valid(())
NonEmpty.from(errors).map(Validated.invalid).getOrElse(Validated.valid(()))
}
private def adminTokenSafetyCheckParticipants(

View File

@ -20,7 +20,6 @@ import com.daml.ledger.api.v2.value.{
RecordField,
Value,
}
import com.digitalasset.daml.lf.value.Value.ContractId
import com.daml.nonempty.NonEmpty
import com.daml.nonempty.NonEmptyReturningOps.*
import com.digitalasset.canton.admin.api.client.commands.LedgerApiTypeWrappers.ContractData
@ -65,6 +64,7 @@ import com.digitalasset.canton.tracing.{NoTracing, TraceContext}
import com.digitalasset.canton.util.{BinaryFileUtil, EitherUtil}
import com.digitalasset.canton.version.ProtocolVersion
import com.digitalasset.canton.{DomainAlias, SequencerAlias}
import com.digitalasset.daml.lf.value.Value.ContractId
import com.google.protobuf.ByteString
import com.typesafe.scalalogging.LazyLogging
import io.circe.Encoder

View File

@ -401,9 +401,6 @@ class ExternalLedgerApiClient(
override val loggerFactory: NamedLoggerFactory =
consoleEnvironment.environment.loggerFactory.append("client", name)
override protected def domainOfTransaction(transactionId: String): DomainId =
throw new NotImplementedError("domain_of is not implemented for external ledger api clients")
override protected[console] def ledgerApiCommand[Result](
command: GrpcAdminCommand[?, ?, Result]
): ConsoleCommandResult[Result] =
@ -413,6 +410,7 @@ class ExternalLedgerApiClient(
override protected def optionallyAwait[Tx](
tx: Tx,
txId: String,
txDomainId: String,
optTimeout: Option[NonNegativeDuration],
): Tx = tx

View File

@ -8,6 +8,7 @@ import cats.syntax.functorFilter.*
import cats.syntax.traverse.*
import com.daml.jwt.JwtDecoder
import com.daml.jwt.domain.Jwt
import com.daml.ledger.api.v2.admin.command_inspection_service.CommandState
import com.daml.ledger.api.v2.admin.package_management_service.PackageDetails
import com.daml.ledger.api.v2.admin.party_management_service.PartyDetails as ProtoPartyDetails
import com.daml.ledger.api.v2.checkpoint.Checkpoint
@ -39,9 +40,9 @@ import com.daml.ledger.javaapi.data.{
TransactionTree,
}
import com.daml.ledger.javaapi as javab
import com.digitalasset.daml.lf.data.Ref
import com.daml.metrics.api.MetricsContext
import com.daml.scalautil.Statement.discard
import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands
import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands.CompletionWrapper
import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands.UpdateService.*
import com.digitalasset.canton.admin.api.client.commands.LedgerApiTypeWrappers.{
@ -49,10 +50,6 @@ import com.digitalasset.canton.admin.api.client.commands.LedgerApiTypeWrappers.{
WrappedIncompleteAssigned,
WrappedIncompleteUnassigned,
}
import com.digitalasset.canton.admin.api.client.commands.{
LedgerApiCommands,
ParticipantAdminCommands,
}
import com.digitalasset.canton.admin.api.client.data.*
import com.digitalasset.canton.config.ConsoleCommandTimeout
import com.digitalasset.canton.config.RequireTypes.PositiveInt
@ -83,11 +80,13 @@ import com.digitalasset.canton.ledger.client.services.admin.IdentityProviderConf
import com.digitalasset.canton.logging.NamedLogging
import com.digitalasset.canton.networking.grpc.{GrpcError, RecordingStreamObserver}
import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil
import com.digitalasset.canton.platform.apiserver.execution.CommandStatus
import com.digitalasset.canton.protocol.LfContractId
import com.digitalasset.canton.topology.{DomainId, ParticipantId, PartyId}
import com.digitalasset.canton.tracing.NoTracing
import com.digitalasset.canton.util.ResourceUtil
import com.digitalasset.canton.{LedgerTransactionId, LfPackageId, LfPartyId, config}
import com.digitalasset.canton.{LfPackageId, LfPartyId, config}
import com.digitalasset.daml.lf.data.Ref
import com.google.protobuf.field_mask.FieldMask
import io.grpc.StatusRuntimeException
import io.grpc.stub.StreamObserver
@ -118,10 +117,10 @@ trait BaseLedgerApiAdministration extends NoTracing {
}
.getOrElse(LedgerApiCommands.defaultApplicationId)
protected def domainOfTransaction(transactionId: String): DomainId
protected def optionallyAwait[Tx](
tx: Tx,
txId: String,
txDomainId: String,
optTimeout: Option[config.NonNegativeDuration],
): Tx
private def timeouts: ConsoleCommandTimeout = consoleEnvironment.commandTimeouts
@ -402,13 +401,6 @@ trait BaseLedgerApiAdministration extends NoTracing {
)
)
})
@Help.Summary("Get the domain that a transaction was committed over.")
@Help.Description(
"""Get the domain that a transaction was committed over. Throws an error if the transaction is not (yet) known
|to the participant or if the transaction has been pruned via `pruning.prune`."""
)
def domain_of(transactionId: String): DomainId = domainOfTransaction(transactionId)
}
@Help.Summary("Submit commands", FeatureFlag.Testing)
@ -462,7 +454,7 @@ trait BaseLedgerApiAdministration extends NoTracing {
)
)
}
optionallyAwait(tx, tx.updateId, optTimeout)
optionallyAwait(tx, tx.updateId, tx.domainId, optTimeout)
}
@Help.Summary(
@ -512,7 +504,7 @@ trait BaseLedgerApiAdministration extends NoTracing {
)
)
}
optionallyAwait(tx, tx.updateId, optTimeout)
optionallyAwait(tx, tx.updateId, tx.domainId, optTimeout)
}
@Help.Summary("Submit command asynchronously", FeatureFlag.Testing)
@ -554,6 +546,36 @@ trait BaseLedgerApiAdministration extends NoTracing {
}
}
@Help.Summary("Investigate successful and failed commands", FeatureFlag.Testing)
@Help.Description(
"""Find the status of commands. Note that only recent commands which are kept in memory will be returned."""
)
def status(
commandIdPrefix: String = "",
state: CommandState = CommandState.COMMAND_STATE_UNSPECIFIED,
limit: PositiveInt = PositiveInt.tryCreate(10),
): Seq[CommandStatus] = check(FeatureFlag.Preview) {
consoleEnvironment.run {
ledgerApiCommand(
LedgerApiCommands.CommandInspectionService.GetCommandStatus(
commandIdPrefix = commandIdPrefix,
state = state,
limit = limit.unwrap,
)
)
}
}
@Help.Summary("Investigate failed commands", FeatureFlag.Testing)
@Help.Description(
"""Same as status(..., state = CommandState.Failed)."""
)
def failed(commandId: String = "", limit: PositiveInt = PositiveInt.tryCreate(10)): Seq[
CommandStatus
] = check(FeatureFlag.Preview) {
status(commandId, CommandState.COMMAND_STATE_FAILED, limit)
}
@Help.Summary(
"Submit assign command and wait for the resulting reassignment, returning the reassignment or failing otherwise",
FeatureFlag.Testing,
@ -796,6 +818,36 @@ trait BaseLedgerApiAdministration extends NoTracing {
)
})
@Help.Summary("Investigate successful and failed commands", FeatureFlag.Testing)
@Help.Description(
"""Find the status of commands. Note that only recent commands which are kept in memory will be returned."""
)
def status(
commandIdPrefix: String = "",
state: CommandState = CommandState.COMMAND_STATE_UNSPECIFIED,
limit: PositiveInt = PositiveInt.tryCreate(10),
): Seq[CommandStatus] = check(FeatureFlag.Preview) {
consoleEnvironment.run {
ledgerApiCommand(
LedgerApiCommands.CommandInspectionService.GetCommandStatus(
commandIdPrefix = commandIdPrefix,
state = state,
limit = limit.unwrap,
)
)
}
}
@Help.Summary("Investigate failed commands", FeatureFlag.Testing)
@Help.Description(
"""Same as status(..., state = CommandState.Failed)."""
)
def failed(commandId: String = "", limit: PositiveInt = PositiveInt.tryCreate(10)): Seq[
CommandStatus
] = check(FeatureFlag.Preview) {
status(commandId, CommandState.COMMAND_STATE_FAILED, limit)
}
@Help.Summary("Read active contracts", FeatureFlag.Testing)
@Help.Group("Active Contracts")
object acs extends Helpful {
@ -1788,7 +1840,9 @@ trait BaseLedgerApiAdministration extends NoTracing {
)
}
javab.data.TransactionTree.fromProto(
TransactionTreeProto.toJavaProto(optionallyAwait(tx, tx.updateId, optTimeout))
TransactionTreeProto.toJavaProto(
optionallyAwait(tx, tx.updateId, tx.domainId, optTimeout)
)
)
}
@ -1841,7 +1895,7 @@ trait BaseLedgerApiAdministration extends NoTracing {
)
}
javab.data.Transaction.fromProto(
TransactionV2.toJavaProto(optionallyAwait(tx, tx.updateId, optTimeout))
TransactionV2.toJavaProto(optionallyAwait(tx, tx.updateId, tx.domainId, optTimeout))
)
}
@ -2248,13 +2302,6 @@ trait LedgerApiAdministration extends BaseLedgerApiAdministration {
implicit protected val consoleEnvironment: ConsoleEnvironment
protected val name: String
override protected def domainOfTransaction(transactionId: String): DomainId = {
val txId = LedgerTransactionId.assertFromString(transactionId)
consoleEnvironment.run {
adminCommand(ParticipantAdminCommands.Inspection.LookupTransactionDomain(txId))
}
}
import com.digitalasset.canton.util.ShowUtil.*
private def awaitTransaction(
@ -2283,9 +2330,10 @@ trait LedgerApiAdministration extends BaseLedgerApiAdministration {
}
private[console] def involvedParticipants(
transactionId: String
transactionId: String,
txDomainId: String,
): Map[ParticipantReference, PartyId] = {
val txDomain = ledger_api.updates.domain_of(transactionId)
val txDomain = DomainId.tryFromString(txDomainId)
// TODO(#6317)
// There's a race condition here, in the unlikely circumstance that the party->participant mapping on the domain
// changes during the command's execution. We'll have to live with it for the moment, as there's no convenient
@ -2348,12 +2396,13 @@ trait LedgerApiAdministration extends BaseLedgerApiAdministration {
protected def optionallyAwait[Tx](
tx: Tx,
txId: String,
txDomainId: String,
optTimeout: Option[config.NonNegativeDuration],
): Tx = {
optTimeout match {
case None => tx
case Some(timeout) =>
val involved = involvedParticipants(txId)
val involved = involvedParticipants(txId, txDomainId)
logger.debug(show"Awaiting transaction ${txId.unquoted} at ${involved.keys.mkShow()}")
awaitTransaction(txId, involved, timeout)
tx

View File

@ -96,6 +96,7 @@ import com.digitalasset.canton.util.*
import com.digitalasset.canton.{DomainAlias, SequencerAlias, config}
import java.time.Instant
import scala.annotation.nowarn
import scala.concurrent.duration.Duration
sealed trait DomainChoice
@ -430,6 +431,7 @@ class LocalParticipantTestingGroup(
This is because the combined event log isn't guaranteed to have increasing timestamps.
"""
)
@nowarn("msg=usage being removed as part of fusing MultiDomainEventLog and Ledger API Indexer")
def event_search(
domain: Option[DomainAlias] = None,
from: Option[Instant] = None,
@ -461,6 +463,7 @@ class LocalParticipantTestingGroup(
Note that if the domain is left blank, the values of `from` and `to` cannot be set.
This is because the combined event log isn't guaranteed to have increasing timestamps.
""")
@nowarn("msg=usage being removed as part of fusing MultiDomainEventLog and Ledger API Indexer")
def transaction_search(
domain: Option[DomainAlias] = None,
from: Option[Instant] = None,

View File

@ -309,7 +309,7 @@ class ParticipantPartiesAdministrationGroup(
TopologyAdminCommands.Write.Propose(
// TODO(#14048) properly set the serial or introduce auto-detection so we don't
// have to set it on the client side
mapping = PartyToParticipant(
mapping = PartyToParticipant.create(
partyId,
None,
threshold,
@ -326,6 +326,8 @@ class ParticipantPartiesAdministrationGroup(
serial = None,
store = AuthorizedStore.filterName,
mustFullyAuthorize = mustFullyAuthorize,
change = TopologyChangeOp.Replace,
forceChanges = ForceFlags.none,
)
)
}

View File

@ -4,7 +4,6 @@
package com.digitalasset.canton.console.commands
import cats.syntax.either.*
import com.digitalasset.daml.lf.data.Ref.PackageId
import com.daml.nameof.NameOf.functionFullName
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.admin.api.client.commands.{GrpcAdminCommand, TopologyAdminCommands}
@ -53,6 +52,7 @@ import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.ShowUtil.*
import com.digitalasset.canton.util.{BinaryFileUtil, OptionUtil}
import com.digitalasset.canton.version.ProtocolVersion
import com.digitalasset.daml.lf.data.Ref.PackageId
import com.google.protobuf.ByteString
import java.time.Duration
@ -1209,6 +1209,29 @@ class TopologyAdministrationGroup(
@Help.Group("Party to participant mappings")
object party_to_participant_mappings extends Helpful {
private def findCurrent(party: PartyId, store: String) = {
TopologyStoreId(store) match {
case TopologyStoreId.DomainStore(domainId, _) =>
expectAtMostOneResult(
list(
domainId,
filterParty = party.filterString,
// fetch both REPLACE and REMOVE to correctly determine the next serial
operation = None,
)
)
case TopologyStoreId.AuthorizedStore =>
expectAtMostOneResult(
list_from_authorized(
filterParty = party.filterString,
// fetch both REPLACE and REMOVE to correctly determine the next serial
operation = None,
)
)
}
}
@Help.Summary("Change party to participant mapping")
@Help.Description("""Change the association of a party to hosting participants.
party: The unique identifier of the party whose set of participants or permission to modify.
@ -1244,27 +1267,7 @@ class TopologyAdministrationGroup(
store: String = AuthorizedStore.filterName,
): SignedTopologyTransaction[TopologyChangeOp, PartyToParticipant] = {
val currentO = TopologyStoreId(store) match {
case TopologyStoreId.DomainStore(domainId, _) =>
expectAtMostOneResult(
list(
domainId,
filterParty = party.filterString,
// fetch both REPLACE and REMOVE to correctly determine the next serial
operation = None,
)
)
case TopologyStoreId.AuthorizedStore =>
expectAtMostOneResult(
list_from_authorized(
filterParty = party.filterString,
// fetch both REPLACE and REMOVE to correctly determine the next serial
operation = None,
)
)
}
val currentO = findCurrent(party, store)
val (existingPermissions, newSerial, threshold, groupAddressing) = currentO match {
case Some(current) if current.context.operation == TopologyChangeOp.Remove =>
(
@ -1361,7 +1364,7 @@ class TopologyAdministrationGroup(
}
val command = TopologyAdminCommands.Write.Propose(
mapping = PartyToParticipant(
mapping = PartyToParticipant.create(
partyId = party,
domainId = domainId,
threshold = threshold,
@ -1373,6 +1376,7 @@ class TopologyAdministrationGroup(
change = op,
mustFullyAuthorize = mustFullyAuthorize,
store = store,
forceChanges = ForceFlags.none,
)
synchronisation.runAdminCommand(synchronize)(command)
@ -1969,13 +1973,16 @@ class TopologyAdministrationGroup(
),
): SignedTopologyTransaction[TopologyChangeOp, AuthorityOf] = {
val command = TopologyAdminCommands.Write.Propose(
AuthorityOf(
val authorityOf = AuthorityOf
.create(
partyId,
domainId,
PositiveInt.tryCreate(threshold),
parties,
),
)
.valueOr(error => consoleEnvironment.run(GenericCommandError(error)))
val command = TopologyAdminCommands.Write.Propose(
authorityOf,
signedBy = signedBy.toList,
serial = serial,
store = store,

View File

@ -82,6 +82,7 @@ trait Environment extends NamedLogging with AutoCloseable with NoTracing {
histogramInventory = histogramInventory,
histogramFilter = baseFilter,
histogramConfigs = config.monitoring.metrics.histograms,
config.monitoring.metrics.cardinality.unwrap,
loggerFactory,
)
}

View File

@ -18,6 +18,6 @@ class CantonHistograms()(implicit val inventory: HistogramInventory) {
private[metrics] val participant: ParticipantHistograms =
new ParticipantHistograms(prefix)
private[metrics] val mediator: MediatorHistograms = new MediatorHistograms(prefix)
private[metrics] val sequencer: SequencerHistograms = new SequencerHistograms(prefix)
private[canton] val sequencer: SequencerHistograms = new SequencerHistograms(prefix)
}

View File

@ -12,13 +12,14 @@ import com.daml.metrics.api.{MetricQualification, MetricsContext, MetricsInfoFil
import com.daml.metrics.grpc.DamlGrpcServerMetrics
import com.daml.metrics.{HealthMetrics, HistogramDefinition, MetricsFilterConfig}
import com.digitalasset.canton.config.NonNegativeFiniteDuration
import com.digitalasset.canton.config.RequireTypes.Port
import com.digitalasset.canton.config.RequireTypes.{Port, PositiveInt}
import com.digitalasset.canton.discard.Implicits.DiscardOps
import com.digitalasset.canton.domain.metrics.{MediatorMetrics, SequencerMetrics}
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.metrics.MetricsConfig.JvmMetrics
import com.digitalasset.canton.metrics.MetricsReporterConfig.{Csv, Logging, Prometheus}
import com.digitalasset.canton.participant.metrics.ParticipantMetrics
import com.digitalasset.canton.telemetry.OpenTelemetryFactory
import com.typesafe.scalalogging.LazyLogging
import io.opentelemetry.api.OpenTelemetry
import io.opentelemetry.api.metrics.Meter
@ -26,6 +27,7 @@ import io.opentelemetry.exporter.prometheus.PrometheusHttpServer
import io.opentelemetry.instrumentation.runtimemetrics.java8.*
import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder
import io.opentelemetry.sdk.metrics.`export`.{MetricExporter, MetricReader, PeriodicMetricReader}
import io.opentelemetry.sdk.metrics.internal.state.MetricStorage
import java.io.File
import java.util.concurrent.ScheduledExecutorService
@ -43,6 +45,7 @@ final case class MetricsConfig(
reporters: Seq[MetricsReporterConfig] = Seq.empty,
jvmMetrics: Option[JvmMetrics] = None,
histograms: Seq[HistogramDefinition] = Seq.empty,
cardinality: PositiveInt = PositiveInt.tryCreate(MetricStorage.DEFAULT_MAX_CARDINALITY),
qualifiers: Seq[MetricQualification] = Seq[MetricQualification](
MetricQualification.Errors,
MetricQualification.Latency,
@ -267,10 +270,15 @@ object MetricsRegistry extends LazyLogging {
}
.zip(config.reporters)
.foreach { case (reader, config) =>
sdkMeterProviderBuilder
.registerMetricReader(FilteringMetricsReader.create(config.filters, reader))
.foreach { case (reader, readerConfig) =>
OpenTelemetryFactory
.registerMetricsReaderWithCardinality(
sdkMeterProviderBuilder,
FilteringMetricsReader.create(readerConfig.filters, reader),
config.cardinality.unwrap,
)
.discard
}
sdkMeterProviderBuilder
}

View File

@ -39,7 +39,8 @@ abstract class CantonAppDriver[E <: Environment] extends App with NamedLogging w
(Map(
"Canton" -> BuildInfo.version,
"Daml Libraries" -> BuildInfo.damlLibrariesVersion,
"Supported Canton protocol versions" -> BuildInfo.protocolVersions.toString(),
"Stable Canton protocol versions" -> BuildInfo.stableProtocolVersions.toString(),
"Preview Canton protocol versions" -> BuildInfo.betaProtocolVersions.toString(),
) ++ additionalVersions) foreach { case (name, version) =>
Console.out.println(s"$name: $version")
}

View File

@ -0,0 +1,9 @@
canton.parameters {
# turn on support of beta version support for domain nodes
beta-version-support = yes
}
canton.participants.participant1.parameters = {
# enable beta version on the participant (this will allow the participant to connect to a domain with beta protocol version)
beta-version-support = yes
}

View File

@ -101,7 +101,6 @@ class ConsoleTest extends AnyWordSpec with BaseTest {
OpenTelemetrySdk.builder().build(),
SdkTracerProvider.builder(),
NoOpOnDemandMetricsReader$,
metricsEnabled = false,
)
)
type NodeGroup = Seq[(String, Nodes[CantonNode, CantonNodeBootstrap[CantonNode]])]

View File

@ -97,6 +97,7 @@ class NodesTest extends FixtureAnyWordSpec with BaseTest with HasExecutionContex
dbMigrateAndStart: Boolean = false,
disableUpgradeValidation: Boolean = false,
devVersionSupport: Boolean = false,
betaVersionSupport: Boolean = false,
dontWarnOnDeprecatedPV: Boolean = false,
initialProtocolVersion: ProtocolVersion = testedProtocolVersion,
exitOnFatalFailures: Boolean = true,
@ -128,7 +129,6 @@ class NodesTest extends FixtureAnyWordSpec with BaseTest with HasExecutionContex
OpenTelemetrySdk.builder().build(),
SdkTracerProvider.builder(),
OnDemandMetricsReader.NoOpOnDemandMetricsReader$,
metricsEnabled = false,
),
)

View File

@ -90,7 +90,7 @@ class CliIntegrationTest extends FixtureAnyWordSpec with BaseTest with SuiteMixi
s"$cantonBin --version" ! processLogger
checkOutput(
processLogger,
shouldContain = Seq("Canton", "Daml Libraries", BuildInfo.protocolVersions.toString),
shouldContain = Seq("Canton", "Daml Libraries", BuildInfo.stableProtocolVersions.toString),
)
}

View File

@ -0,0 +1,16 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
syntax = "proto3";
package com.digitalasset.canton.protocol.v30;
import "google/protobuf/wrappers.proto";
import "scalapb/scalapb.proto";
message OrderingRequest {
option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion";
string sequencer_id = 1; // Id of the sequencer requesting ordering of the request
google.protobuf.BytesValue content = 2; // Content of the request to be ordered
}

View File

@ -52,8 +52,10 @@ message TrafficConsumed {
uint64 extra_traffic_consumed = 2;
// Remaining free base traffic
uint64 base_traffic_remainder = 3;
// Cost deducted at `timestamp`, only present when traffic was consumed at `timestamp`, otherwise is set to 0
uint64 last_consumed_cost = 4;
// Timestamp at which this state is valid - this timestamp is used to compute the base traffic remainder above
int64 sequencing_timestamp = 4; // in microseconds of UTC time since Unix epoch
int64 sequencing_timestamp = 5; // in microseconds of UTC time since Unix epoch
}
// Message representing a traffic purchase made on behalf of a member
@ -77,10 +79,12 @@ message TrafficState {
int64 extra_traffic_consumed = 2;
// Amount of base traffic remaining
int64 base_traffic_remainder = 3;
// Cost deducted at `timestamp`, only present when traffic was consumed at `timestamp`, otherwise is set to 0
uint64 last_consumed_cost = 4;
// Timestamp at which the state is valid
int64 timestamp = 4;
int64 timestamp = 5;
// Optional serial of the balance update that updated the extra traffic limit
google.protobuf.UInt32Value serial = 5;
google.protobuf.UInt32Value serial = 6;
}
message SetTrafficPurchasedMessage {

View File

@ -5,7 +5,6 @@ package com.digitalasset.canton.data
import cats.syntax.either.*
import cats.syntax.traverse.*
import com.digitalasset.daml.lf.value.{Value, ValueCoder, ValueOuterClass}
import com.digitalasset.canton.ProtoDeserializationError.{
FieldNotSet,
OtherError,
@ -34,6 +33,7 @@ import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.util.NoCopy
import com.digitalasset.canton.version.*
import com.digitalasset.canton.{LfChoiceName, LfInterfaceId, LfPackageId, LfPartyId, LfVersioned}
import com.digitalasset.daml.lf.value.{Value, ValueCoder, ValueOuterClass}
import com.google.protobuf.ByteString
/** Summarizes the information that is needed in addition to the other fields of [[ViewParticipantData]] for

View File

@ -3,8 +3,8 @@
package com.digitalasset.canton.data
import com.digitalasset.daml.lf.data.Time.Timestamp
import com.daml.logging.entries.{LoggingValue, ToLoggingValue}
import com.digitalasset.daml.lf.data.Time.Timestamp
import java.time.Duration
import scala.util.{Failure, Success, Try}

View File

@ -3,11 +3,13 @@
package com.digitalasset.canton.data
import com.digitalasset.daml.lf.data.{Bytes, Ref}
import com.daml.logging.entries.{LoggingValue, ToLoggingValue}
import com.digitalasset.canton.data.Offset.beforeBegin
import com.digitalasset.daml.lf.data.{Bytes, Ref}
import com.google.protobuf.ByteString
import java.io.InputStream
import java.nio.{ByteBuffer, ByteOrder}
/** Offsets into streams with hierarchical addressing.
*
@ -31,10 +33,16 @@ final case class Offset(bytes: Bytes) extends Ordered[Offset] {
def toByteArray: Array[Byte] = bytes.toByteArray
def toHexString: Ref.HexString = bytes.toHexString
def toLong: Long =
if (this == beforeBegin) 0L
else ByteBuffer.wrap(bytes.toByteArray).getLong(1)
}
object Offset {
val beforeBegin: Offset = new Offset(Bytes.Empty)
private val longBasedByteLength: Int = 9 // One byte for the version plus 8 bytes for Long
private val versionUpstreamOffsetsAsLong: Byte = 0
def fromByteString(bytes: ByteString) = new Offset(Bytes.fromByteString(bytes))
@ -44,6 +52,21 @@ object Offset {
def fromHexString(s: Ref.HexString) = new Offset(Bytes.fromHexString(s))
def fromLong(l: Long): Offset =
if (l == 0L) beforeBegin
else
Offset(
com.digitalasset.daml.lf.data.Bytes.fromByteString(
ByteString.copyFrom(
ByteBuffer
.allocate(longBasedByteLength)
.order(ByteOrder.BIG_ENDIAN)
.put(0, versionUpstreamOffsetsAsLong)
.putLong(1, l)
)
)
)
implicit val `Offset to LoggingValue`: ToLoggingValue[Offset] = value =>
LoggingValue.OfString(value.toHexString)
}

View File

@ -5,7 +5,6 @@ package com.digitalasset.canton.data
import cats.syntax.either.*
import cats.syntax.traverse.*
import com.digitalasset.daml.lf.data.Ref
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.*
import com.digitalasset.canton.crypto.*
@ -15,6 +14,7 @@ import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.serialization.{ProtoConverter, ProtocolVersionedMemoizedEvidence}
import com.digitalasset.canton.topology.*
import com.digitalasset.canton.version.*
import com.digitalasset.daml.lf.data.Ref
import com.google.protobuf.ByteString
/** Information about the submitters of the transaction

View File

@ -56,12 +56,14 @@ object CantonNodeParameters {
}
trait Protocol {
def devVersionSupport: Boolean
def betaVersionSupport: Boolean
def dontWarnOnDeprecatedPV: Boolean
}
object Protocol {
final case class Impl(
devVersionSupport: Boolean,
betaVersionSupport: Boolean,
dontWarnOnDeprecatedPV: Boolean,
) extends CantonNodeParameters.Protocol
}
@ -94,5 +96,6 @@ trait HasProtocolCantonNodeParameters extends CantonNodeParameters.Protocol {
protected def protocol: CantonNodeParameters.Protocol
def devVersionSupport: Boolean = protocol.devVersionSupport
def betaVersionSupport: Boolean = protocol.betaVersionSupport
def dontWarnOnDeprecatedPV: Boolean = protocol.dontWarnOnDeprecatedPV
}

View File

@ -18,6 +18,7 @@ import com.digitalasset.canton.{
DoNotTraverseLikeFuture,
}
import java.util.concurrent.CompletionException
import scala.concurrent.{Awaitable, ExecutionContext, Future}
import scala.util.chaining.*
import scala.util.{Failure, Success, Try}
@ -81,8 +82,14 @@ object FutureUnlessShutdown {
apply(f.transform({
case Success(value) => Success(UnlessShutdown.Outcome(value))
case Failure(AbortedDueToShutdownException(_)) => Success(UnlessShutdown.AbortedDueToShutdown)
case Failure(ce: CompletionException) =>
ce.getCause match {
case AbortedDueToShutdownException(_) => Success(UnlessShutdown.AbortedDueToShutdown)
case _ => Failure(ce)
}
case Failure(other) => Failure(other)
}))
}
/** Monad combination of `Future` and [[UnlessShutdown]]

View File

@ -5,12 +5,6 @@ package com.digitalasset.canton.logging.pretty
import cats.Show.Shown
import com.daml.error.utils.DecodedCantonError
import com.digitalasset.daml.lf.data.Ref
import com.digitalasset.daml.lf.data.Ref.{DottedName, PackageId, QualifiedName}
import com.digitalasset.daml.lf.transaction.ContractStateMachine.ActiveLedgerState
import com.digitalasset.daml.lf.transaction.TransactionErrors.*
import com.digitalasset.daml.lf.transaction.Versioned
import com.digitalasset.daml.lf.value.Value
import com.daml.nonempty.{NonEmpty, NonEmptyUtil}
import com.digitalasset.canton.config.RequireTypes.{Port, RefinedNumeric}
import com.digitalasset.canton.data.DeduplicationPeriod
@ -26,6 +20,12 @@ import com.digitalasset.canton.{
LfVersioned,
Uninhabited,
}
import com.digitalasset.daml.lf.data.Ref
import com.digitalasset.daml.lf.data.Ref.{DottedName, PackageId, QualifiedName}
import com.digitalasset.daml.lf.transaction.ContractStateMachine.ActiveLedgerState
import com.digitalasset.daml.lf.transaction.TransactionErrors.*
import com.digitalasset.daml.lf.transaction.Versioned
import com.digitalasset.daml.lf.value.Value
import com.google.protobuf.ByteString
import io.grpc.Status
import io.grpc.health.v1.HealthCheckResponse.ServingStatus
@ -63,6 +63,8 @@ trait PrettyInstances {
implicit def prettyLong: Pretty[Long] = prettyOfString(_.toString)
implicit def prettyBigDecimal: Pretty[BigDecimal] = prettyOfString(_.toString)
implicit def prettyJLong: Pretty[JLong] = prettyOfString(_.toString)
implicit def prettyBoolean: Pretty[Boolean] = prettyOfString(_.toString)

View File

@ -3,12 +3,12 @@
package com.digitalasset
import com.digitalasset.canton.data.{Counter, CounterCompanion}
import com.digitalasset.canton.serialization.DeterministicEncoding.encodeLong
import com.digitalasset.daml.lf.command.ReplayCommand
import com.digitalasset.daml.lf.data.{IdString, Ref, Time}
import com.digitalasset.daml.lf.transaction.{ContractStateMachine, Versioned}
import com.digitalasset.daml.lf.value.Value
import com.digitalasset.canton.data.{Counter, CounterCompanion}
import com.digitalasset.canton.serialization.DeterministicEncoding.encodeLong
import com.google.protobuf.ByteString
package object canton {

View File

@ -4,12 +4,12 @@
package com.digitalasset.canton.protocol
import cats.syntax.either.*
import com.digitalasset.daml.lf.data.Bytes
import com.digitalasset.canton.checked
import com.digitalasset.canton.config.CantonRequireTypes.String255
import com.digitalasset.canton.config.RequireTypes.NonNegativeInt
import com.digitalasset.canton.crypto.*
import com.digitalasset.canton.version.ProtocolVersion
import com.digitalasset.daml.lf.data.Bytes
import com.google.protobuf.ByteString
object CantonContractIdVersion {

View File

@ -293,13 +293,13 @@ object OnboardingRestriction {
* Must be greater than `maxSequencingTime` specified by a participant,
* practically also requires extra slack to allow clock skew between participant and sequencer.
* @param onboardingRestriction current onboarding restrictions for participants
* @param catchUpParameters Optional parameters of type [[com.digitalasset.canton.protocol.AcsCommitmentsCatchUpConfig]].
* Defined starting with protobuf version v2 and protocol version v30.
* If None, the catch-up mode is disabled: the participant does not trigger the
* catch-up mode when lagging behind.
* If not None, it specifies the number of reconciliation intervals that the
* participant skips in catch-up mode, and the number of catch-up intervals
* intervals a participant should lag behind in order to enter catch-up mode.
* @param acsCommitmentsCatchUpConfig Optional parameters of type [[com.digitalasset.canton.protocol.AcsCommitmentsCatchUpConfig]].
* Defined starting with protobuf version v2 and protocol version v30.
* If None, the catch-up mode is disabled: the participant does not trigger the
* catch-up mode when lagging behind.
* If not None, it specifies the number of reconciliation intervals that the
* participant skips in catch-up mode, and the number of catch-up intervals
* intervals a participant should lag behind in order to enter catch-up mode.
*
* @throws DynamicDomainParameters$.InvalidDynamicDomainParameters
* if `mediatorDeduplicationTimeout` is less than twice of `ledgerTimeRecordTimeTolerance`.

View File

@ -3,7 +3,6 @@
package com.digitalasset.canton.protocol
import com.digitalasset.daml.lf.data.Bytes as LfBytes
import com.digitalasset.canton.crypto.Salt
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.serialization.ProtoConverter
@ -14,6 +13,7 @@ import com.digitalasset.canton.version.{
ProtoVersion,
ProtocolVersion,
}
import com.digitalasset.daml.lf.data.Bytes as LfBytes
import scala.util.chaining.*

View File

@ -4,11 +4,11 @@
package com.digitalasset.canton.protocol
import cats.syntax.either.*
import com.digitalasset.daml.lf.data.Ref
import com.digitalasset.daml.lf.value.{ValueCoder, ValueOuterClass}
import com.digitalasset.canton.serialization.ProtoConverter
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.{LfVersioned, ProtoDeserializationError}
import com.digitalasset.daml.lf.data.Ref
import com.digitalasset.daml.lf.value.{ValueCoder, ValueOuterClass}
object GlobalKeySerialization {

View File

@ -4,9 +4,9 @@
package com.digitalasset.canton.protocol
import cats.syntax.either.*
import com.digitalasset.daml.lf.data.Bytes
import com.digitalasset.canton.ProtoDeserializationError.ValueDeserializationError
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.daml.lf.data.Bytes
import com.google.protobuf.ByteString
object LfHashSyntax {

View File

@ -4,9 +4,9 @@
package com.digitalasset.canton.protocol
import cats.syntax.either.*
import com.digitalasset.canton.ProtoDeserializationError.ValueDeserializationError
import com.digitalasset.daml.lf.data.Ref
import com.digitalasset.daml.lf.data.Ref.Identifier
import com.digitalasset.canton.ProtoDeserializationError.ValueDeserializationError
object RefIdentifierSyntax {
implicit class RefIdentifierSyntax(private val identifier: Ref.Identifier) extends AnyVal {

View File

@ -5,7 +5,6 @@ package com.digitalasset.canton.protocol
import cats.implicits.toTraverseOps
import cats.syntax.either.*
import com.digitalasset.daml.lf.value.ValueCoder
import com.digitalasset.canton.ProtoDeserializationError.ValueConversionError
import com.digitalasset.canton.crypto.Salt
import com.digitalasset.canton.data.{CantonTimestamp, ProcessedDisclosedContract}
@ -16,6 +15,7 @@ import com.digitalasset.canton.serialization.ProtoConverter
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.version.*
import com.digitalasset.canton.{LfTimestamp, admin, crypto, protocol}
import com.digitalasset.daml.lf.value.ValueCoder
import com.google.protobuf.ByteString
import com.google.protobuf.timestamp.Timestamp
import io.scalaland.chimney.dsl.*

View File

@ -3,10 +3,10 @@
package com.digitalasset.canton.protocol
import com.digitalasset.daml.lf.data.Bytes as LfBytes
import com.digitalasset.canton.ProtoDeserializationError
import com.digitalasset.canton.data.{DeduplicationPeriod, Offset}
import com.digitalasset.canton.serialization.ProtoConverter.{DurationConverter, ParsingResult}
import com.digitalasset.daml.lf.data.Bytes as LfBytes
final case class SerializableDeduplicationPeriod(deduplicationPeriod: DeduplicationPeriod) {
def toProtoV30: v30.DeduplicationPeriod = deduplicationPeriod match {

View File

@ -4,8 +4,6 @@
package com.digitalasset.canton.protocol
import cats.syntax.either.*
import com.digitalasset.daml.lf.transaction.{TransactionCoder, TransactionOuterClass}
import com.digitalasset.daml.lf.value.ValueCoder
import com.digitalasset.canton.ProtoDeserializationError.ValueConversionError
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.serialization.{
@ -14,6 +12,8 @@ import com.digitalasset.canton.serialization.{
SerializationCheckFailed,
}
import com.digitalasset.canton.store.db.DbDeserializationException
import com.digitalasset.daml.lf.transaction.{TransactionCoder, TransactionOuterClass}
import com.digitalasset.daml.lf.value.ValueCoder
import com.google.common.annotations.VisibleForTesting
import com.google.protobuf.ByteString
import monocle.Lens

View File

@ -3,8 +3,8 @@
package com.digitalasset.canton.protocol
import com.digitalasset.daml.lf.data.Bytes
import com.digitalasset.canton.crypto.Hash
import com.digitalasset.daml.lf.data.Bytes
/** A hash-based identifier for contracts.
* Must be paired with a discriminator to obtain a complete contract ID.

View File

@ -12,12 +12,22 @@ import com.digitalasset.canton.topology.client.TopologySnapshot
import com.digitalasset.canton.topology.{ParticipantId, PartyId}
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.ShowUtil.*
import com.digitalasset.canton.util.{Checked, ErrorUtil}
import com.digitalasset.canton.util.{Checked, ErrorUtil, SetCover}
import scala.concurrent.{ExecutionContext, Future}
object RootHashMessageRecipients extends HasLoggerName {
/** Computes the list of recipients for the root hash messages of a confirmation request.
* Each recipient returned is either a participant or a group address
* [[com.digitalasset.canton.sequencing.protocol.ParticipantsOfParty]].
* The group addresses can be overlapping, but a participant member recipient will only be present if it is
* not included in any of the group addresses.
*
* @param informees informees of the confirmation request
* @param ipsSnapshot topology snapshot used at submission time
* @return list of root hash message recipients
*/
def rootHashRecipientsForInformees(
informees: Set[LfPartyId],
ipsSnapshot: TopologySnapshot,
@ -37,10 +47,10 @@ object RootHashMessageRecipients extends HasLoggerName {
)
)
)
groupAddressedInformees <- ipsSnapshot.partiesWithGroupAddressing(informeesList)
participantsOfGroupAddressedInformees <- ipsSnapshot.activeParticipantsOfParties(
groupAddressedInformees.toList
)
participantsOfGroupAddressedInformees <- ipsSnapshot
.activeParticipantsOfPartiesWithGroupAddressing(
informeesList
)
} yield {
// If there are several group-addressed informees with overlapping participants,
// we actually look for a set cover. It doesn't matter which one we pick.
@ -86,28 +96,45 @@ object RootHashMessageRecipients extends HasLoggerName {
} ++ directlyAddressedParticipants.map { participant =>
MemberRecipient(participant) -> Set(participant)
}
// TODO(#13883) Use a set cover for the recipients instead of all of them
// SetCover.greedy(sets.toMap)
sets.map { case (recipient, _) => recipient }.toSeq
SetCover.greedy(sets)
}
}
/** Validate the recipients of root hash messages received by a participant in Phase 3.
*/
def validateRecipientsOnParticipant(recipients: Recipients): Checked[Nothing, String, Unit] = {
recipients.asSingleGroup match {
case Some(group) if group.sizeCompare(2) == 0 =>
// group members must be participantId and mediator, due to previous checks
Checked.unit
case Some(group) =>
val hasGroupAddressing = group.collect { case ParticipantsOfParty(party) =>
party.toLf
}.nonEmpty
if (hasGroupAddressing) Checked.unit
else Checked.continue(s"The root hash message has an invalid recipient group.\n$recipients")
case _ =>
Checked.continue(s"The root hash message has more than one recipient group.\n$recipients")
// group members must be of size 2, which must be participant and mediator, due to previous checks
val validGroups = recipients.trees.collect {
case RecipientsTree(group, Seq()) if group.sizeCompare(2) == 0 => group
}
if (validGroups.size == recipients.trees.size) {
val allUseGroupAddressing = validGroups.forall {
_.exists {
case ParticipantsOfParty(_) => true
case _ => false
}
}
// Due to how rootHashRecipientsForInformees() computes recipients, if there is more than one group,
// they must all address the participant using group addressing.
if (allUseGroupAddressing || validGroups.sizeCompare(1) == 0) Checked.unit
else
Checked.continue(
s"The root hash message has more than one recipient group, not all using group addressing.\n$recipients"
)
} else Checked.continue(s"The root hash message has invalid recipient groups.\n$recipients")
}
/** Validate the recipients of root hash messages received by a mediator in Phase 2.
*
* A recipient is valid if each recipient tree:
* - contains only a single recipient group (no children)
* - the recipient group is if size 2
* - the recipient group contains:
* - the mediator group recipient
* - either a participant member recipient or a PartyOfParticipant group recipient
*/
def wrongAndCorrectRecipients(
recipientsList: Seq[Recipients],
mediator: MediatorGroupRecipient,
@ -115,18 +142,14 @@ object RootHashMessageRecipients extends HasLoggerName {
val (wrongRecipients, correctRecipients) = recipientsList.flatMap { recipients =>
recipients.trees.toList.map {
case tree @ RecipientsTree(group, Seq()) =>
val participantCount = group.count {
case MemberRecipient(_: ParticipantId) => true
val hasMediator = group.contains(mediator)
val hasParticipantOrPop = group.exists {
case MemberRecipient(_: ParticipantId) | ParticipantsOfParty(_) => true
case _ => false
}
val groupAddressCount = group.count {
case ParticipantsOfParty(_) => true
case _ => false
}
val groupAddressingBeingUsed = groupAddressCount > 0
Either.cond(
((group.size == 2) || (groupAddressingBeingUsed && group.size >= 2)) &&
group.contains(mediator) && (participantCount + groupAddressCount > 0),
group.sizeCompare(2) == 0 && hasMediator && hasParticipantOrPop,
group,
tree,
)

View File

@ -3,14 +3,14 @@
package com.digitalasset.canton
import com.digitalasset.daml.lf.crypto.Hash
import com.digitalasset.daml.lf.data.Ref
import com.digitalasset.daml.lf.transaction.*
import com.digitalasset.daml.lf.value.Value
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.data.ViewType
import com.digitalasset.canton.protocol.messages.EncryptedViewMessage
import com.digitalasset.canton.sequencing.protocol.OpenEnvelope
import com.digitalasset.daml.lf.crypto.Hash
import com.digitalasset.daml.lf.data.Ref
import com.digitalasset.daml.lf.transaction.*
import com.digitalasset.daml.lf.value.Value
/** Provides shorthands for general purpose types.
* <p>

View File

@ -296,7 +296,8 @@ abstract class ReplayingSendsSequencerClientTransportCommon(
private def scheduleCheck(): Unit = {
performUnlessClosing(functionFullName) {
val nextCheckDuration = idlenessDuration.toJava.minus(elapsed(stateRef.get()))
val nextCheckDuration =
idlenessDuration.toJava.minus(durationFromLastEventToNow(stateRef.get()))
val _ = materializer.scheduleOnce(nextCheckDuration.toScala, () => checkIfIdle())
}.onShutdown(())
}
@ -315,25 +316,35 @@ abstract class ReplayingSendsSequencerClientTransportCommon(
private def checkIfIdle(): Unit = {
val stateSnapshot = stateRef.get()
val elapsedDuration = elapsed(stateSnapshot)
val isIdle = elapsedDuration.compareTo(idlenessDuration.toJava) >= 0
val lastEventTime = stateSnapshot.lastEventAt.getOrElse(stateSnapshot.startedAt).toInstant
val elapsedDuration =
java.time.Duration.between(stateSnapshot.startedAt.toInstant, lastEventTime)
val isIdle = durationFromLastEventToNow(stateSnapshot).compareTo(idlenessDuration.toJava) >= 0
if (isIdle) {
idleP
.trySuccess(
EventsReceivedReport(
elapsedDuration.toScala,
totalEventsReceived = stateSnapshot.eventCounter,
finishedAtCounter = stateSnapshot.lastCounter,
if (pendingSends.sizeIs > 0) {
idleP
.tryFailure(
new IllegalStateException(s"There are ${pendingSends.size} pending send requests")
)
)
.discard
.discard
} else {
idleP
.trySuccess(
EventsReceivedReport(
elapsedDuration.toScala,
totalEventsReceived = stateSnapshot.eventCounter,
finishedAtCounter = stateSnapshot.lastCounter,
)
)
.discard
}
} else {
scheduleCheck() // schedule the next check
}
}
private def elapsed(stateSnapshot: State) = {
private def durationFromLastEventToNow(stateSnapshot: State) = {
val from = stateSnapshot.lastEventAt.getOrElse(stateSnapshot.startedAt)
java.time.Duration.between(from.toInstant, Instant.now())
}

View File

@ -59,15 +59,16 @@ final case class SubmissionRequest private (
@VisibleForTesting
def isConfirmationRequest: Boolean = {
val hasParticipantRecipient = batch.allMembers.exists {
case _: ParticipantId => true
case _: Member => false
val hasParticipantOrPopRecipient = batch.allRecipients.exists {
case MemberRecipient(_: ParticipantId) => true
case ParticipantsOfParty(_) => true
case _ => false
}
val hasMediatorRecipient = batch.allRecipients.exists {
case _: MediatorGroupRecipient => true
case _: Recipient => false
}
hasParticipantRecipient && hasMediatorRecipient
hasParticipantOrPopRecipient && hasMediatorRecipient
}
// Caches the serialized request to be able to do checks on its size without re-serializing

View File

@ -25,31 +25,35 @@ final case class TrafficState(
extraTrafficPurchased: NonNegativeLong,
extraTrafficConsumed: NonNegativeLong,
baseTrafficRemainder: NonNegativeLong,
lastConsumedCost: NonNegativeLong,
timestamp: CantonTimestamp,
serial: Option[PositiveInt],
) extends PrettyPrinting {
def extraTrafficRemainder: Long = extraTrafficPurchased.value - extraTrafficConsumed.value
def availableTraffic: Long = extraTrafficRemainder + baseTrafficRemainder.value
// Need big decimal here because it could overflow a long especially if extraTrafficPurchased == Long.MAX
lazy val availableTraffic: BigDecimal =
BigDecimal(extraTrafficRemainder) + BigDecimal(baseTrafficRemainder.value)
def toProtoV30: v30.TrafficState = v30.TrafficState(
extraTrafficPurchased = extraTrafficPurchased.value,
extraTrafficConsumed = extraTrafficConsumed.value,
baseTrafficRemainder = baseTrafficRemainder.value,
lastConsumedCost = lastConsumedCost.value,
timestamp = timestamp.toProtoPrimitive,
serial = serial.map(_.value),
)
def toTrafficConsumed(member: Member): TrafficConsumed = TrafficConsumed(
member = member,
sequencingTimestamp = timestamp,
extraTrafficConsumed = extraTrafficConsumed,
baseTrafficRemainder = baseTrafficRemainder,
)
def toTrafficConsumed(member: Member): TrafficConsumed =
TrafficConsumed(
member = member,
sequencingTimestamp = timestamp,
extraTrafficConsumed = extraTrafficConsumed,
baseTrafficRemainder = baseTrafficRemainder,
lastConsumedCost = lastConsumedCost,
)
def toTrafficReceipt(
consumedCost: NonNegativeLong
): TrafficReceipt = TrafficReceipt(
consumedCost = consumedCost,
def toTrafficReceipt: TrafficReceipt = TrafficReceipt(
consumedCost = lastConsumedCost,
extraTrafficConsumed = extraTrafficConsumed,
baseTrafficRemainder = baseTrafficRemainder,
)
@ -67,8 +71,10 @@ final case class TrafficState(
param("extraTrafficLimit", _.extraTrafficPurchased),
param("extraTrafficConsumed", _.extraTrafficConsumed),
param("baseTrafficRemainder", _.baseTrafficRemainder),
param("lastConsumedCost", _.lastConsumedCost),
param("timestamp", _.timestamp),
paramIfDefined("serial", _.serial),
param("availableTraffic", _.availableTraffic),
)
}
@ -78,13 +84,15 @@ object TrafficState {
pp >> Some(v.extraTrafficPurchased.value)
pp >> Some(v.extraTrafficConsumed.value)
pp >> Some(v.baseTrafficRemainder.value)
pp >> Some(v.lastConsumedCost.value)
pp >> v.timestamp
pp >> v.serial.map(_.value)
}
implicit val getResultTrafficState: GetResult[Option[TrafficState]] = {
GetResult
.createGetTuple5(
.createGetTuple6(
nonNegativeLongOptionGetResult,
nonNegativeLongOptionGetResult,
nonNegativeLongOptionGetResult,
nonNegativeLongOptionGetResult,
@ -98,6 +106,7 @@ object TrafficState {
NonNegativeLong.zero,
NonNegativeLong.zero,
NonNegativeLong.zero,
NonNegativeLong.zero,
CantonTimestamp.Epoch,
Option.empty,
)
@ -106,6 +115,7 @@ object TrafficState {
NonNegativeLong.zero,
NonNegativeLong.zero,
NonNegativeLong.zero,
NonNegativeLong.zero,
timestamp,
Option.empty,
)
@ -116,12 +126,14 @@ object TrafficState {
extraTrafficLimit <- ProtoConverter.parseNonNegativeLong(trafficStateP.extraTrafficPurchased)
extraTrafficConsumed <- ProtoConverter.parseNonNegativeLong(trafficStateP.extraTrafficConsumed)
baseTrafficRemainder <- ProtoConverter.parseNonNegativeLong(trafficStateP.baseTrafficRemainder)
lastConsumedCost <- ProtoConverter.parseNonNegativeLong(trafficStateP.lastConsumedCost)
timestamp <- CantonTimestamp.fromProtoPrimitive(trafficStateP.timestamp)
serial <- trafficStateP.serial.traverse(ProtoConverter.parsePositiveInt)
} yield TrafficState(
extraTrafficLimit,
extraTrafficConsumed,
baseTrafficRemainder,
lastConsumedCost,
timestamp,
serial,
)

View File

@ -3,13 +3,14 @@
package com.digitalasset.canton.sequencing.traffic
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt}
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.sequencing.protocol.{
Batch,
ClosedEnvelope,
GroupRecipient,
MemberRecipient,
import com.digitalasset.canton.sequencing.protocol.*
import com.digitalasset.canton.sequencing.traffic.EventCostCalculator.{
EnvelopeCostDetails,
EventCostDetails,
}
import com.digitalasset.canton.topology.Member
import com.digitalasset.canton.tracing.TraceContext
@ -17,6 +18,51 @@ import com.digitalasset.canton.util.ErrorUtil
import com.digitalasset.canton.version.ProtocolVersion
import com.google.common.annotations.VisibleForTesting
object EventCostCalculator {
/** Contains details of the computation of the cost of an envelope.
* @param writeCost write cost associated with the envelope
* @param readCost read cost associated with the envelope
* @param finalCost final cost associated with the envelope (typically writeCost + readCost at the moment)
* @param recipients recipients of the envelope
*/
final case class EnvelopeCostDetails(
writeCost: Long,
readCost: Long,
finalCost: Long,
recipients: NonEmpty[Seq[Recipient]],
) extends PrettyPrinting {
override def pretty: Pretty[EnvelopeCostDetails] = prettyOfClass(
param("write cost", _.writeCost),
param("read cost", _.readCost),
param("final cost", _.finalCost),
param("recipients", _.recipients),
)
}
/** Contains details of the computation of the cost of an event
* @param costMultiplier cost multiplier used for the computation
* @param groupToMembersSize size of each recipient group
* @param envelopes details of the cost computation of each envelope
* @param eventCost final cost of the event
*/
final case class EventCostDetails(
costMultiplier: PositiveInt,
groupToMembersSize: Map[GroupRecipient, Int],
envelopes: List[EnvelopeCostDetails],
eventCost: NonNegativeLong,
) extends PrettyPrinting {
override def pretty: Pretty[EventCostDetails] = prettyOfClass(
param("cost multiplier", _.costMultiplier),
param("group to members size", _.groupToMembersSize),
param("envelopes cost details", _.envelopes),
param("event cost", _.eventCost),
)
}
}
// TODO(i12907): Precise costs calculations
class EventCostCalculator(override val loggerFactory: NamedLoggerFactory) extends NamedLogging {
@ -25,12 +71,17 @@ class EventCostCalculator(override val loggerFactory: NamedLoggerFactory) extend
costMultiplier: PositiveInt,
groupToMembers: Map[GroupRecipient, Set[Member]],
protocolVersion: ProtocolVersion,
)(implicit traceContext: TraceContext): NonNegativeLong = {
)(implicit traceContext: TraceContext): EventCostDetails = {
// If changing the cost computation, make sure to tie it to a protocol version
// For now there's only one version of cost computation
if (protocolVersion >= ProtocolVersion.v31) {
NonNegativeLong.tryCreate(
event.envelopes.map(computeEnvelopeCost(costMultiplier, groupToMembers)).sum
val envelopeCosts = event.envelopes.map(computeEnvelopeCost(costMultiplier, groupToMembers))
val eventCost = NonNegativeLong.tryCreate(envelopeCosts.map(_.finalCost).sum)
EventCostDetails(
costMultiplier,
groupToMembers.view.mapValues(_.size).toMap,
envelopeCosts,
eventCost,
)
} else {
ErrorUtil.invalidState(
@ -46,10 +97,10 @@ class EventCostCalculator(override val loggerFactory: NamedLoggerFactory) extend
def computeEnvelopeCost(
costMultiplier: PositiveInt,
groupToMembers: Map[GroupRecipient, Set[Member]],
)(envelope: ClosedEnvelope): Long = {
val writeCosts = payloadSize(envelope).toLong
)(envelope: ClosedEnvelope): EnvelopeCostDetails = {
val writeCost = payloadSize(envelope).toLong
val allRecipients = envelope.recipients.allRecipients.toSeq
val allRecipients: NonEmpty[Seq[Recipient]] = envelope.recipients.allRecipients.toSeq
val recipientsSize = allRecipients.map {
case recipient: GroupRecipient => groupToMembers.get(recipient).map(_.size).getOrElse(0)
case _: MemberRecipient => 1
@ -58,14 +109,20 @@ class EventCostCalculator(override val loggerFactory: NamedLoggerFactory) extend
// read costs are based on the write costs and multiplied by the number of recipients with a readVsWrite cost multiplier
try {
// `writeCosts` and `recipientsSize` are originally Int, so multiplying them together cannot overflow a long
val readCosts =
math.multiplyExact(writeCosts * recipientsSize.toLong, costMultiplier.value.toLong) / 10000
math.addExact(readCosts, writeCosts)
val readCost =
math.multiplyExact(writeCost * recipientsSize.toLong, costMultiplier.value.toLong) / 10000
val finalCost = math.addExact(readCost, writeCost)
EnvelopeCostDetails(
writeCost = writeCost,
readCost = readCost,
finalCost = finalCost,
allRecipients,
)
} catch {
case _: ArithmeticException =>
throw new IllegalStateException(
s"""Overflow in cost computation:
| writeCosts = $writeCosts
| writeCosts = $writeCost
| recipientsSize = $recipientsSize
| costMultiplier = $costMultiplier""".stripMargin
)

View File

@ -24,18 +24,18 @@ import slick.jdbc.GetResult
* @param sequencingTimestamp sequencing timestamp at which this traffic consumed state is valid
* @param extraTrafficConsumed extra traffic consumed at this sequencing timestamp
* @param baseTrafficRemainder base traffic remaining at this sequencing timestamp
* @param lastConsumedCost last cost deducted from the traffic balance (base and if not enough, extra)
*/
final case class TrafficConsumed(
member: Member,
sequencingTimestamp: CantonTimestamp,
extraTrafficConsumed: NonNegativeLong,
baseTrafficRemainder: NonNegativeLong,
lastConsumedCost: NonNegativeLong,
) extends PrettyPrinting {
def toTrafficReceipt(
consumedCost: NonNegativeLong
): TrafficReceipt = TrafficReceipt(
consumedCost = consumedCost,
def toTrafficReceipt: TrafficReceipt = TrafficReceipt(
consumedCost = lastConsumedCost,
extraTrafficConsumed,
baseTrafficRemainder,
)
@ -48,6 +48,7 @@ final case class TrafficConsumed(
trafficPurchased.map(_.extraTrafficPurchased).getOrElse(NonNegativeLong.zero),
extraTrafficConsumed,
baseTrafficRemainder,
lastConsumedCost,
trafficPurchased
.map(_.sequencingTimestamp.max(sequencingTimestamp))
.getOrElse(sequencingTimestamp),
@ -105,6 +106,7 @@ final case class TrafficConsumed(
copy(
baseTrafficRemainder = baseTrafficRemainderAtCurrentTime,
sequencingTimestamp = timestamp,
lastConsumedCost = NonNegativeLong.zero,
)
}
@ -127,6 +129,7 @@ final case class TrafficConsumed(
baseTrafficRemainder = baseTrafficRemainderAfterConsume,
extraTrafficConsumed = this.extraTrafficConsumed + extraTrafficConsumed,
sequencingTimestamp = sequencingTimestamp,
lastConsumedCost = cost,
)
}
@ -157,6 +160,7 @@ final case class TrafficConsumed(
param("member", _.member),
param("extraTrafficConsumed", _.extraTrafficConsumed),
param("baseTrafficRemainder", _.baseTrafficRemainder),
param("lastConsumedCost", _.lastConsumedCost),
param("sequencingTimestamp", _.sequencingTimestamp),
)
@ -166,6 +170,7 @@ final case class TrafficConsumed(
extraTrafficConsumed = extraTrafficConsumed.value,
baseTrafficRemainder = baseTrafficRemainder.value,
sequencingTimestamp = sequencingTimestamp.toProtoPrimitive,
lastConsumedCost = lastConsumedCost.value,
)
}
}
@ -177,7 +182,13 @@ object TrafficConsumed {
/** TrafficConsumed object for members the first time they submit a submission request
*/
def init(member: Member): TrafficConsumed =
TrafficConsumed(member, CantonTimestamp.MinValue, NonNegativeLong.zero, NonNegativeLong.zero)
TrafficConsumed(
member,
CantonTimestamp.MinValue,
NonNegativeLong.zero,
NonNegativeLong.zero,
NonNegativeLong.zero,
)
def empty(
member: Member,
@ -188,16 +199,18 @@ object TrafficConsumed {
timestamp,
NonNegativeLong.zero,
baseTraffic,
NonNegativeLong.zero,
)
implicit val trafficConsumedOrdering: Ordering[TrafficConsumed] =
Ordering.by(_.sequencingTimestamp)
implicit val trafficConsumedGetResult: GetResult[TrafficConsumed] =
GetResult.createGetTuple4[Member, CantonTimestamp, NonNegativeLong, NonNegativeLong].andThen {
case (member, ts, trafficConsumed, baseTraffic) =>
TrafficConsumed(member, ts, trafficConsumed, baseTraffic)
}
GetResult
.createGetTuple5[Member, CantonTimestamp, NonNegativeLong, NonNegativeLong, NonNegativeLong]
.andThen { case (member, ts, trafficConsumed, baseTraffic, lastConsumedCost) =>
TrafficConsumed(member, ts, trafficConsumed, baseTraffic, lastConsumedCost)
}
def fromProtoV30(trafficConsumedP: TrafficConsumedP): ParsingResult[TrafficConsumed] =
for {
@ -211,10 +224,14 @@ object TrafficConsumed {
sequencingTimestamp <- CantonTimestamp.fromProtoPrimitive(
trafficConsumedP.sequencingTimestamp
)
lastConsumedCost <- ProtoConverter.parseNonNegativeLong(
trafficConsumedP.lastConsumedCost
)
} yield TrafficConsumed(
member = member,
extraTrafficConsumed = extraTrafficConsumed,
baseTrafficRemainder = baseTrafficRemainder,
sequencingTimestamp = sequencingTimestamp,
lastConsumedCost = lastConsumedCost,
)
}

View File

@ -43,6 +43,7 @@ class TrafficConsumedManager(
current.copy(
extraTrafficConsumed = trafficReceipt.extraTrafficConsumed,
baseTrafficRemainder = trafficReceipt.baseTrafficRemainder,
lastConsumedCost = trafficReceipt.consumedCost,
sequencingTimestamp = timestamp,
)
case current => current
@ -101,7 +102,7 @@ class TrafficConsumedManager(
}.discard
Left(value)
case Right(_) =>
val newState = trafficConsumed.updateAndGet {
val newState = updateAndGet {
_.consume(timestamp, params, eventCost, logger)
}
logger.debug(s"Consumed ${eventCost.value} for $member at $timestamp: new state $newState")

View File

@ -163,12 +163,18 @@ class TrafficStateController(
GroupAddressResolver.resolveGroupsToMembers(groups.toSet, snapshot)
)
.mapK(FutureUnlessShutdown.outcomeK)
} yield eventCostCalculator.computeEventCost(
batch.map(_.closeEnvelope),
trafficControl.readVsWriteScalingFactor,
groupToMembers,
protocolVersion,
)
} yield {
val costDetails = eventCostCalculator.computeEventCost(
batch.map(_.closeEnvelope),
trafficControl.readVsWriteScalingFactor,
groupToMembers,
protocolVersion,
)
logger.debug(
s"Computed following cost for submission request using topology at ${snapshot.timestamp}: $costDetails"
)
costDetails.eventCost
}
costFO.value.map {
_.map { cost =>

View File

@ -5,7 +5,6 @@ package com.digitalasset.canton.serialization
import cats.syntax.either.*
import cats.syntax.traverse.*
import com.digitalasset.daml.lf.data.Ref
import com.daml.nonempty.NonEmpty
import com.daml.nonempty.catsinstances.*
import com.digitalasset.canton.ProtoDeserializationError.{
@ -32,6 +31,7 @@ import com.digitalasset.canton.{
LfWorkflowId,
ProtoDeserializationError,
}
import com.digitalasset.daml.lf.data.Ref
import com.google.protobuf.timestamp.Timestamp
import com.google.protobuf.{ByteString, CodedInputStream, InvalidProtocolBufferException}

View File

@ -435,23 +435,6 @@ object TopologyManagerError extends TopologyManagerErrorGroup {
with TopologyManagerError
}
@Explanation(
"This error indicates that a threshold in the submitted transaction was higher than the number of members that would have to satisfy that threshold."
)
@Resolution(
"""Submit the topology transaction with a lower threshold.
|The metadata details of this error contain the expected maximum in the field ``expectedMaximum``."""
)
object InvalidThreshold
extends ErrorCode(id = "INVALID_THRESHOLD", ErrorCategory.InvalidIndependentOfSystemState) {
final case class ThresholdTooHigh(actual: Int, expectedMaximum: Int)(implicit
override val loggingContext: ErrorLoggingContext
) extends CantonError.Impl(
cause = s"Threshold must not be higher than $expectedMaximum, but was $actual."
)
with TopologyManagerError
}
@Explanation(
"This error indicates that members referenced in a topology transaction have not declared at least one signing key or at least 1 encryption key or both."
)
@ -473,6 +456,20 @@ object TopologyManagerError extends TopologyManagerErrorGroup {
with TopologyManagerError
}
object PartyExceedsHostingLimit
extends ErrorCode(
id = "PARTY_EXCEEDS_HOSTING_LIMIT",
ErrorCategory.InvalidIndependentOfSystemState,
) {
final case class Reject(party: PartyId, limit: Int, numParticipants: Int)(implicit
override val loggingContext: ErrorLoggingContext
) extends CantonError.Impl(
cause =
s"Party $party exceeds hosting limit of $limit with desired number of $numParticipants hosting participant."
)
with TopologyManagerError
}
@Explanation(
"This error indicates that the topology transaction references members that are currently unknown."
)
@ -572,7 +569,7 @@ object TopologyManagerError extends TopologyManagerErrorGroup {
object InvalidTopologyMapping
extends ErrorCode(
id = "INVALID_TOPOLOGY_MAPPING",
ErrorCategory.InvalidGivenCurrentSystemStateOther,
ErrorCategory.InvalidIndependentOfSystemState,
) {
final case class Reject(
description: String
@ -605,7 +602,36 @@ object TopologyManagerError extends TopologyManagerErrorGroup {
}
)
with TopologyManagerError
final case class MissingDomainParameters(effectiveTime: EffectiveTime)(implicit
override val loggingContext: ErrorLoggingContext
) extends CantonError.Impl(
cause = s"Missing domain parameters at $effectiveTime"
)
with TopologyManagerError
}
@Explanation(
"""This error indicates that the namespace is already used by another entity."""
)
@Resolution(
"""Change the namespace used in the submitted topology transaction."""
)
object NamespaceAlreadyInUse
extends ErrorCode(
id = "NAMESPACE_ALREADY_IN_USE",
ErrorCategory.InvalidGivenCurrentSystemStateResourceExists,
) {
final case class Reject(
namespace: Namespace
)(implicit
override val loggingContext: ErrorLoggingContext
) extends CantonError.Impl(
cause = s"The namespace $namespace is already in use by another entity."
)
with TopologyManagerError
}
abstract class DomainErrorGroup extends ErrorGroup()
abstract class ParticipantErrorGroup extends ErrorGroup()

View File

@ -167,6 +167,7 @@ class TopologyStateProcessor(
s"${enqueuingOrStoring} topology transaction ${idx + 1}/$ln ${tx.operation} ${tx.mapping} with ts=$effective (epsilon=${epsilon} ms)"
)
case (ValidatedTopologyTransaction(tx, Some(r), _), idx) =>
// TODO(i19737): we need to emit a security alert, if the rejection is due to a malicious broadcast
logger.info(
s"Rejected transaction ${idx + 1}/$ln ${tx.operation} ${tx.mapping} at ts=$effective (epsilon=${epsilon} ms) due to $r"
)
@ -296,18 +297,13 @@ class TopologyStateProcessor(
authValidator
.validateAndUpdateHeadAuthState(
effective.value,
Seq(toValidate),
inStore.map(tx => tx.mapping.uniqueKey -> tx).toList.toMap,
toValidate,
inStore,
expectFullAuthorization,
)
)
.subflatMap { case (_, txs) =>
// TODO(#12390) proper error
txs.headOption
.toRight[TopologyTransactionRejection](
TopologyTransactionRejection.Other("expected validation result doesn't exist")
)
.flatMap(tx => tx.rejectionReason.toLeft(tx.transaction))
.subflatMap { case (_, tx) =>
tx.rejectionReason.toLeft(tx.transaction)
}
}

View File

@ -4,7 +4,6 @@
package com.digitalasset.canton.topology.client
import cats.data.EitherT
import com.digitalasset.daml.lf.data.Ref.PackageId
import com.digitalasset.canton.concurrent.FutureSupervisor
import com.digitalasset.canton.config.{BatchingConfig, CachingConfigs, ProcessingTimeout}
import com.digitalasset.canton.crypto.SigningPublicKey
@ -31,6 +30,7 @@ import com.digitalasset.canton.util.FutureInstances.*
import com.digitalasset.canton.util.{ErrorUtil, MonadUtil}
import com.digitalasset.canton.version.ProtocolVersion
import com.digitalasset.canton.{LfPartyId, SequencerCounter}
import com.digitalasset.daml.lf.data.Ref.PackageId
import java.util.concurrent.atomic.AtomicReference
import scala.concurrent.duration.Duration

View File

@ -8,7 +8,6 @@ import cats.data.EitherT
import cats.syntax.functor.*
import cats.syntax.functorFilter.*
import cats.syntax.parallel.*
import com.digitalasset.daml.lf.data.Ref.PackageId
import com.digitalasset.canton.concurrent.HasFutureSupervision
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.crypto.{EncryptionPublicKey, SigningPublicKey}
@ -36,6 +35,7 @@ import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.SingleUseCell
import com.digitalasset.canton.version.ProtocolVersion
import com.digitalasset.canton.{LfPartyId, checked}
import com.digitalasset.daml.lf.data.Ref.PackageId
import scala.collection.concurrent.TrieMap
import scala.collection.immutable
@ -299,6 +299,10 @@ trait PartyTopologySnapshotClient {
parties: Seq[LfPartyId]
)(implicit traceContext: TraceContext): Future[Set[LfPartyId]]
def activeParticipantsOfPartiesWithGroupAddressing(
parties: Seq[LfPartyId]
)(implicit traceContext: TraceContext): Future[Map[LfPartyId, Set[ParticipantId]]]
/** Returns a list of all known parties on this domain */
def inspectKnownParties(
filterParty: String,
@ -841,6 +845,11 @@ private[client] trait PartyTopologySnapshotLoader
): Future[Set[LfPartyId]] =
loadAndMapPartyInfos(parties, identity, _.groupAddressing).map(_.keySet)
final override def activeParticipantsOfPartiesWithGroupAddressing(
parties: Seq[LfPartyId]
)(implicit traceContext: TraceContext): Future[Map[LfPartyId, Set[ParticipantId]]] =
loadAndMapPartyInfos(parties, _.participants.keySet, _.groupAddressing)
final override def consortiumThresholds(
parties: Set[LfPartyId]
)(implicit traceContext: TraceContext): Future[Map[LfPartyId, PositiveInt]] =

View File

@ -5,7 +5,6 @@ package com.digitalasset.canton.topology.client
import cats.data.EitherT
import cats.syntax.functor.*
import com.digitalasset.daml.lf.data.Ref.PackageId
import com.daml.nameof.NameOf.functionFullName
import com.digitalasset.canton.SequencerCounter
import com.digitalasset.canton.concurrent.FutureSupervisor
@ -26,6 +25,7 @@ import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.Ge
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.ErrorUtil
import com.digitalasset.canton.version.ProtocolVersion
import com.digitalasset.daml.lf.data.Ref.PackageId
import java.time.Duration as JDuration
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}

View File

@ -5,7 +5,6 @@ package com.digitalasset.canton.topology.client
import cats.data.EitherT
import cats.syntax.functorFilter.*
import com.digitalasset.daml.lf.data.Ref.PackageId
import com.digitalasset.canton.LfPartyId
import com.digitalasset.canton.crypto.{KeyPurpose, SigningPublicKey}
import com.digitalasset.canton.data.CantonTimestamp
@ -26,6 +25,7 @@ import com.digitalasset.canton.topology.store.*
import com.digitalasset.canton.topology.transaction.TopologyChangeOp.Replace
import com.digitalasset.canton.topology.transaction.*
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.daml.lf.data.Ref.PackageId
import scala.concurrent.{ExecutionContext, Future}
import scala.reflect.ClassTag

View File

@ -8,12 +8,12 @@ import com.digitalasset.canton.crypto.{Fingerprint, SigningPublicKey}
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.topology.Namespace
import com.digitalasset.canton.topology.processing.AuthorizedTopologyTransaction.AuthorizedNamespaceDelegation
import com.digitalasset.canton.topology.transaction.TopologyChangeOp.{Remove, Replace}
import com.digitalasset.canton.topology.transaction.*
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.ErrorUtil
import com.digitalasset.canton.util.ShowUtil.*
import scala.annotation.tailrec
import scala.collection.concurrent.TrieMap
import scala.math.Ordering.Implicits.*
@ -35,8 +35,8 @@ object AuthorizedTopologyTransaction {
/** Returns true if the namespace delegation is a root certificate
*
* A root certificate is defined by the namespace delegation that authorizes the
* key f to act on namespace spanned by f, authorized by f.
* A root certificate is defined by a namespace delegation that authorizes the
* key f to act on the namespace spanned by f, authorized by f.
*/
def isRootCertificate(namespaceDelegation: AuthorizedNamespaceDelegation): Boolean = {
NamespaceDelegation.isRootCertificate(namespaceDelegation.transaction)
@ -44,11 +44,7 @@ object AuthorizedTopologyTransaction {
/** Returns true if the namespace delegation is a root certificate or a root delegation
*
* A root certificate is defined by the namespace delegation that authorizes the
* key f to act on namespace spanned by f, authorized by f.
*
* A root delegation is defined by the namespace delegation the authorizes the
* key g to act on namespace spanned by f.
* A root delegation is a namespace delegation whose target key may be used to authorize other namespace delegations.
*/
def isRootDelegation(namespaceDelegation: AuthorizedNamespaceDelegation): Boolean = {
NamespaceDelegation.isRootDelegation(namespaceDelegation.transaction)
@ -56,49 +52,45 @@ object AuthorizedTopologyTransaction {
}
/** maintain a dependency graph for the namespace delegations
/** Stores a set of namespace delegations, tracks dependencies and
* determines which keys are authorized to sign on behalf of a namespace.
*
* namespace delegations are a bit tricky as there can be an arbitrary number of delegations before we reach
* the actual key that will be used for authorizations. think of it as a certificate chain where we get a
* Namespace delegations are a bit tricky as there can be an arbitrary number of delegations between the namespace key
* and the key that will be used for authorizations. Think of it as a certificate chain where we get a
* series of certificates and we need to figure out a path from one certificate to the root certificate.
*
* NOTE: this class is not thread-safe
*
* properties of the graph:
* - the nodes are the target key fingerprints
* - the node with fingerprint of the namespace is the root node
* - the edges between the nodes are the authorizations where key A authorizes key B to act on the namespace
* in this case, the authorization is outgoing from A and incoming to B.
* - the graph SHOULD be a directed acyclic graph, but we MIGHT have cycles (i.e. key A authorizing B, B authorizing A).
* we don't need to make a fuss about cycles in the graph. we just ignore / report them assuming it was an admin
* mistake, but we don't get confused.
* - root certificates are edges pointing to the node itself. they are separate such that they don't show up
* in the list of incoming / outgoing.
* - we track for each node the set of outgoing edges and incoming edges. an outgoing edge is a delegation where
* the source node is authorizing a target node. obviously every outgoing edge is also an incoming edge.
* Properties of the graph:
* - Each node corresponds to a target key
* - The node with key fingerprint of the namespace is the root node
* - The edges between nodes are namespace delegations.
* If key A signs a namespace delegation with target key B, then key A authorizes key B to act on the namespace.
* In this case, the edge is outgoing from node A and incoming into node B.
* - The graph may have cycles. The implementation does not get confused by this.
*
* computation task:
* - once we've modified the graph, we compute the nodes that are somehow connected to the root node.
* Computation task:
* The graph maintains a set of nodes that are connected to the root node. Those correspond to the keys that are
* authorized to sign on behalf of the namespace.
*
* purpose:
* - once we know which target keys are actually authorized to act on this particular namespace, we can then use
* this information to find out which resulting mapping is properly authorized and which one is not.
* Limitation: clients need to ensure that the namespace delegations added have valid signatures.
* If delegations with invalid signatures are added, authorization will break.
*
* authorization checks:
* - when adding "single transactions", we do check that the transaction is properly authorized. otherwise we
* "ignore" it (returning false). this is used during processing.
* - when adding "batch transactions", we don't check that all of them are properly authorized, as we do allow
* temporarily "nodes" to be unauthorized (so that errors can be fixed by adding a replacement certificate)
* - when removing transactions, we do check that the authorizing key is authorized. but note that the authorizing
* key of an edge REMOVAL doesn't need to match the key used to authorized the ADD.
* @param extraDebugInfo whether to log the authorization graph at debug level on every recomputation
*/
class AuthorizationGraph(
val namespace: Namespace,
extraDebugInfo: Boolean,
val loggerFactory: NamedLoggerFactory,
override protected val loggerFactory: NamedLoggerFactory,
) extends AuthorizationCheck
with NamedLogging {
/** @param root the last active root certificate for `target`
* @param outgoing all active namespace delegations (excluding root certificates) authorized by `target`
* @param incoming all active namespace delegations for the namespace `target`
*
* All namespace delegations are for namespace `this.namespace`.
*/
private case class GraphNode(
target: Fingerprint,
root: Option[AuthorizedNamespaceDelegation] = None,
@ -113,9 +105,9 @@ class AuthorizationGraph(
private abstract class AuthLevel(val isAuth: Boolean, val isRoot: Boolean)
private object AuthLevel {
object NotAuthorized extends AuthLevel(false, false)
object Standard extends AuthLevel(true, false)
object RootDelegation extends AuthLevel(true, true)
private object NotAuthorized extends AuthLevel(false, false)
private object Standard extends AuthLevel(true, false)
private object RootDelegation extends AuthLevel(true, true)
implicit val orderingAuthLevel: Ordering[AuthLevel] =
Ordering.by[AuthLevel, Int](authl => Seq(authl.isAuth, authl.isRoot).count(identity))
@ -129,23 +121,30 @@ class AuthorizationGraph(
}
/** GraphNodes by GraphNode.target */
private val nodes = new TrieMap[Fingerprint, GraphNode]()
/** temporary cache for the current graph authorization check results
*
* if a fingerprint is empty, then we haven't yet computed the answer
*/
/** Authorized namespace delegations for namespace `this.namespace`, grouped by target */
private val cache =
new TrieMap[Fingerprint, Option[AuthorizedNamespaceDelegation]]()
new TrieMap[Fingerprint, AuthorizedNamespaceDelegation]()
/** Check if `item` is authorized and, if so, add its mapping to this graph.
*
* @throws java.lang.IllegalArgumentException if `item` does not refer to `namespace` or the operation is not REPLACE.
*/
def add(item: AuthorizedNamespaceDelegation)(implicit traceContext: TraceContext): Boolean = {
ErrorUtil.requireArgument(
item.mapping.namespace == namespace,
s"added namespace ${item.mapping.namespace} to $namespace",
s"unable to add namespace delegation for ${item.mapping.namespace} to graph for $namespace",
)
ErrorUtil.requireArgument(
item.operation == Replace,
s"unable to add namespace delegation with operation ${item.operation} to graph for $namespace",
)
if (
AuthorizedTopologyTransaction.isRootCertificate(item) ||
this.areValidAuthorizationKeys(item.signingKeys, requireRoot = true)
this.existsAuthorizedKeyIn(item.signingKeys, requireRoot = true)
) {
doAdd(item)
recompute()
@ -153,6 +152,12 @@ class AuthorizationGraph(
} else false
}
/** Add the mappings in `items` to this graph, regardless if they are authorized or not.
* If an unauthorized namespace delegation is added to the graph, the graph will contain nodes that are not connected to the root.
* The target key of the unauthorized delegation will still be considered unauthorized.
*
* @throws java.lang.IllegalArgumentException if `item` does not refer to `namespace` or the operation is not REPLACE.
*/
def unauthorizedAdd(
items: Seq[AuthorizedNamespaceDelegation]
)(implicit traceContext: TraceContext): Unit = {
@ -163,6 +168,15 @@ class AuthorizationGraph(
private def doAdd(
item: AuthorizedNamespaceDelegation
)(implicit traceContext: TraceContext): Unit = {
ErrorUtil.requireArgument(
item.mapping.namespace == namespace,
s"unable to add namespace delegation for ${item.mapping.namespace} to graph for $namespace",
)
ErrorUtil.requireArgument(
item.operation == Replace,
s"unable to add namespace delegation with operation ${item.operation} to graph for $namespace",
)
val targetKey = item.mapping.target.fingerprint
val curTarget = nodes.getOrElse(targetKey, GraphNode(targetKey))
// if this is a root certificate, remember it separately
@ -181,32 +195,38 @@ class AuthorizationGraph(
}
}
def remove(item: AuthorizedNamespaceDelegation)(implicit traceContext: TraceContext): Boolean =
if (areValidAuthorizationKeys(item.signingKeys, requireRoot = true)) {
/** Check if `item` is authorized and, if so, remove its mapping from this graph.
* Note that addition and removal of a namespace delegation can be authorized by different keys.
*
* @throws java.lang.IllegalArgumentException if `item` does not refer to `namespace` or the operation is not REMOVE.
*/
def remove(item: AuthorizedNamespaceDelegation)(implicit traceContext: TraceContext): Boolean = {
ErrorUtil.requireArgument(
item.mapping.namespace == namespace,
s"unable to remove namespace delegation for ${item.mapping.namespace} from graph for $namespace",
)
ErrorUtil.requireArgument(
item.operation == Remove,
s"unable to remove namespace delegation with operation ${item.operation} from graph for $namespace",
)
if (existsAuthorizedKeyIn(item.signingKeys, requireRoot = true)) {
doRemove(item)
true
} else false
def unauthorizedRemove(
items: Seq[AuthorizedNamespaceDelegation]
)(implicit traceContext: TraceContext): Unit = {
items.foreach(doRemove)
}
/** remove a namespace delegation
*
* note that this one is a bit tricky as the removal might have been authorized
* by a different key than the addition. this is fine but it complicates the book-keeping,
* The implementation is a bit tricky as the removal might have been authorized
* by a different key than the addition. This complicates the book-keeping,
* as we need to track for each target key what the "incoming authorizations" were solely for the
* purpose of being able to clean them up
* purpose of being able to clean them up.
*/
private def doRemove(
item: AuthorizedNamespaceDelegation
)(implicit traceContext: TraceContext): Unit = {
ErrorUtil.requireArgument(
item.mapping.namespace == namespace,
s"removing namespace ${item.mapping.namespace} from $namespace",
)
def myFilter(existing: AuthorizedNamespaceDelegation): Boolean = {
// the auth key doesn't need to match on removals
existing.mapping != item.mapping
@ -248,10 +268,9 @@ class AuthorizationGraph(
updateRemove(targetKey, curTarget.copy(incoming = curTarget.incoming.filter(myFilter)))
}
recompute()
case None =>
logger.warn(s"Superfluous removal of namespace delegation $item")
}
case None => logger.warn(s"Superfluous removal of namespace delegation $item")
}
}
protected def recompute()(implicit traceContext: TraceContext): Unit = {
@ -269,12 +288,12 @@ class AuthorizationGraph(
fingerprint: Fingerprint,
incoming: AuthorizedNamespaceDelegation,
): Unit = {
val current = cache.getOrElseUpdate(fingerprint, None)
val current = cache.get(fingerprint)
val currentLevel = AuthLevel.fromDelegationO(current)
val incomingLevel = AuthLevel.fromDelegationO(Some(incoming))
// this inherited level is higher than current, propagate it
if (incomingLevel > currentLevel) {
cache.update(fingerprint, Some(incoming))
cache.update(fingerprint, incoming)
// get the graph node of this fingerprint
nodes.get(fingerprint).foreach { graphNode =>
// iterate through all edges that depart from this node
@ -310,7 +329,7 @@ class AuthorizationGraph(
}
if (extraDebugInfo && logger.underlying.isDebugEnabled) {
val str =
authorizedDelegations()
cache.values
.map(aud =>
show"auth=${aud.signingKeys}, target=${aud.mapping.target.fingerprint}, root=${AuthorizedTopologyTransaction
.isRootCertificate(aud)}"
@ -320,144 +339,99 @@ class AuthorizationGraph(
}
} else
logger.debug(
s"Namespace ${namespace} has no root certificate, making all ${nodes.size} un-authorized"
s"Namespace $namespace has no root certificate, making all ${nodes.size} un-authorized"
)
override def areValidAuthorizationKeys(
override def existsAuthorizedKeyIn(
authKeys: Set[Fingerprint],
requireRoot: Boolean,
): Boolean = {
authKeys.exists { authKey =>
val authLevel = AuthLevel.fromDelegationO(cache.getOrElse(authKey, None))
authLevel.isRoot || (authLevel.isAuth && !requireRoot)
}
}
): Boolean = authKeys.exists(getAuthorizedKey(_, requireRoot).nonEmpty)
override def getValidAuthorizationKeys(
authKeys: Set[Fingerprint],
private def getAuthorizedKey(
authKey: Fingerprint,
requireRoot: Boolean,
): Set[SigningPublicKey] = authKeys.flatMap(authKey =>
): Option[SigningPublicKey] =
cache
.getOrElse(authKey, None)
.map(_.mapping.target)
.filter(_ => areValidAuthorizationKeys(Set(authKey), requireRoot))
)
def authorizationChain(
startAuthKey: Fingerprint,
requireRoot: Boolean,
): Option[AuthorizationChain] = {
@tailrec
def go(
authKey: Fingerprint,
requireRoot: Boolean,
acc: List[AuthorizedNamespaceDelegation],
): List[AuthorizedNamespaceDelegation] = {
cache.getOrElse(authKey, None) match {
// we've terminated with the root certificate
case Some(delegation) if AuthorizedTopologyTransaction.isRootCertificate(delegation) =>
delegation :: acc
// cert is valid, append it
case Some(delegation) if delegation.mapping.isRootDelegation || !requireRoot =>
go(delegation.signingKeys.head1, delegation.mapping.isRootDelegation, delegation :: acc)
// return empty to indicate failure
case _ => List.empty
.get(authKey)
.filter { delegation =>
val authLevel = AuthLevel.fromDelegationO(Some(delegation))
authLevel.isRoot || (authLevel.isAuth && !requireRoot)
}
}
go(startAuthKey, requireRoot, List.empty) match {
case Nil => None
case rest =>
Some(
AuthorizationChain(
identifierDelegation = Seq.empty,
namespaceDelegations = rest,
Seq.empty,
)
)
}
}
.map(_.mapping.target)
def authorizedDelegations(): Seq[AuthorizedNamespaceDelegation] =
cache.values.flatMap(_.toList).toSeq
override def keysSupportingAuthorization(
authKeys: Set[Fingerprint],
requireRoot: Boolean,
): Set[SigningPublicKey] = authKeys.flatMap(getAuthorizedKey(_, requireRoot))
override def toString: String = s"AuthorizationGraph($namespace)"
def debugInfo() = s"$namespace => ${nodes.mkString("\n")}"
}
trait AuthorizationCheck {
def areValidAuthorizationKeys(authKeys: Set[Fingerprint], requireRoot: Boolean): Boolean
def getValidAuthorizationKeys(
/** Determines if a subset of the given keys is authorized to sign on behalf of the (possibly decentralized) namespace.
*
* @param requireRoot whether the authorization must be suitable to authorize namespace delegations
*/
def existsAuthorizedKeyIn(authKeys: Set[Fingerprint], requireRoot: Boolean): Boolean
/** Returns those keys that are useful for signing on behalf of the (possibly decentralized) namespace.
* Only keys with fingerprint in `authKeys` will be returned.
* The returned keys are not necessarily sufficient to authorize a transaction on behalf of the namespace;
* in case of a decentralized namespace, additional signatures may be required.
*/
def keysSupportingAuthorization(
authKeys: Set[Fingerprint],
requireRoot: Boolean,
): Set[SigningPublicKey]
def authorizationChain(
startAuthKey: Fingerprint,
requireRoot: Boolean,
): Option[AuthorizationChain]
def authorizedDelegations(): Seq[AuthorizedNamespaceDelegation]
}
object AuthorizationCheck {
val empty = new AuthorizationCheck {
override def areValidAuthorizationKeys(
val empty: AuthorizationCheck = new AuthorizationCheck {
override def existsAuthorizedKeyIn(
authKeys: Set[Fingerprint],
requireRoot: Boolean,
): Boolean = false
override def authorizationChain(
startAuthKey: Fingerprint,
requireRoot: Boolean,
): Option[AuthorizationChain] = None
override def getValidAuthorizationKeys(
override def keysSupportingAuthorization(
authKeys: Set[Fingerprint],
requireRoot: Boolean,
): Set[SigningPublicKey] = Set.empty
override def authorizedDelegations(): Seq[AuthorizedNamespaceDelegation] = Seq.empty
override def toString: String = "AuthorizationCheck.empty"
}
}
/** Authorization graph for a decentralized namespace.
*
* @throws java.lang.IllegalArgumentException if `dnd` and `direct` refer to different namespaces.
*/
final case class DecentralizedNamespaceAuthorizationGraph(
dnd: DecentralizedNamespaceDefinition,
direct: AuthorizationGraph,
ownerGraphs: Seq[AuthorizationGraph],
) extends AuthorizationCheck {
override def areValidAuthorizationKeys(
require(
dnd.namespace == direct.namespace,
s"The direct graph refers to the wrong namespace (expected: ${dnd.namespace}, actual: ${direct.namespace}).",
)
override def existsAuthorizedKeyIn(
authKeys: Set[Fingerprint],
requireRoot: Boolean,
): Boolean = {
val viaNamespaceDelegation = direct.areValidAuthorizationKeys(authKeys, requireRoot)
val viaNamespaceDelegation = direct.existsAuthorizedKeyIn(authKeys, requireRoot)
val viaCollective =
ownerGraphs.count(_.areValidAuthorizationKeys(authKeys, requireRoot)) >= dnd.threshold.value
ownerGraphs.count(_.existsAuthorizedKeyIn(authKeys, requireRoot)) >= dnd.threshold.value
viaNamespaceDelegation || viaCollective
}
import cats.syntax.foldable.*
override def getValidAuthorizationKeys(
override def keysSupportingAuthorization(
authKeys: Set[Fingerprint],
requireRoot: Boolean,
): Set[SigningPublicKey] = {
(direct +: ownerGraphs)
.flatMap(_.getValidAuthorizationKeys(authKeys, requireRoot))
.flatMap(_.keysSupportingAuthorization(authKeys, requireRoot))
.toSet
}
override def authorizationChain(
startAuthKey: Fingerprint,
requireRoot: Boolean,
): Option[AuthorizationChain] =
direct
.authorizationChain(startAuthKey, requireRoot)
.orElse(ownerGraphs.map(_.authorizationChain(startAuthKey, requireRoot)).combineAll)
override def authorizedDelegations(): Seq[AuthorizedNamespaceDelegation] =
direct.authorizedDelegations() ++ ownerGraphs.flatMap(_.authorizedDelegations())
}

View File

@ -5,7 +5,8 @@ package com.digitalasset.canton.topology.processing
import cats.Monoid
import cats.data.EitherT
import cats.syntax.parallel.*
import cats.syntax.bifunctor.*
import cats.syntax.foldable.*
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.crypto.CryptoPureApi
import com.digitalasset.canton.data.CantonTimestamp
@ -20,14 +21,10 @@ import com.digitalasset.canton.topology.processing.AuthorizedTopologyTransaction
import com.digitalasset.canton.topology.store.ValidatedTopologyTransaction.GenericValidatedTopologyTransaction
import com.digitalasset.canton.topology.store.*
import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction
import com.digitalasset.canton.topology.transaction.TopologyMapping.{
MappingHash,
RequiredAuthAuthorizations,
}
import com.digitalasset.canton.topology.transaction.TopologyMapping.RequiredAuthAuthorizations
import com.digitalasset.canton.topology.transaction.TopologyTransaction.GenericTopologyTransaction
import com.digitalasset.canton.topology.transaction.*
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.FutureInstances.*
import scala.concurrent.{ExecutionContext, Future}
@ -130,17 +127,14 @@ class IncomingTopologyTransactionAuthorizationValidator(
*/
def validateAndUpdateHeadAuthState(
timestamp: CantonTimestamp,
transactionsToValidate: Seq[GenericSignedTopologyTransaction],
transactionsInStore: Map[MappingHash, GenericSignedTopologyTransaction],
toValidate: GenericSignedTopologyTransaction,
inStore: Option[GenericSignedTopologyTransaction],
expectFullAuthorization: Boolean,
)(implicit
traceContext: TraceContext
): Future[(UpdateAggregation, Seq[GenericValidatedTopologyTransaction])] = {
): Future[(UpdateAggregation, GenericValidatedTopologyTransaction)] = {
for {
authCheckResult <- determineRelevantUidsAndNamespaces(
transactionsToValidate,
transactionsInStore.view.mapValues(_.transaction).toMap,
)
authCheckResult <- determineRelevantUidsAndNamespaces(toValidate, inStore.map(_.transaction))
(updateAggregation, targetDomainVerified) = authCheckResult
loadGraphsF = loadAuthorizationGraphs(timestamp, updateAggregation.authNamespaces)
loadUidsF = loadIdentifierDelegationsCascading(
@ -153,11 +147,11 @@ class IncomingTopologyTransactionAuthorizationValidator(
} yield {
logger.debug(s"Update aggregation yielded ${updateAggregation}")
val validated = targetDomainVerified.map {
val validated = targetDomainVerified match {
case ValidatedTopologyTransaction(tx, None, _) =>
processTransaction(
tx,
transactionsInStore.get(tx.mapping.uniqueKey),
inStore,
expectFullAuthorization,
)
case v => v
@ -173,101 +167,124 @@ class IncomingTopologyTransactionAuthorizationValidator(
}
}
/** Validates a topology transaction as follows:
* <ol>
* <li>check that the transaction has valid signatures and is sufficiently authorized. if not, reject.</li>
* <li>if there are no missing authorizers, as is the case for proposals, we update internal caches for NSD, IDD, and DND</li>
* <li>if this validation is run to determine a final verdict, as is the case for processing topology transactions coming from the domain,
* automatically clear the proposal flag for transactions with sufficent authorizing signatures.</li>
* </ol>
*/
private def processTransaction(
toValidate: GenericSignedTopologyTransaction,
inStore: Option[GenericSignedTopologyTransaction],
expectFullAuthorization: Boolean,
)(implicit traceContext: TraceContext): GenericValidatedTopologyTransaction = {
val processedNs = toValidate.selectMapping[NamespaceDelegation].forall { sigTx =>
processNamespaceDelegation(
toValidate.operation,
AuthorizedTopologyTransaction(sigTx),
)
}
// See validateRootCertificate why we need to check the removal of a root certificate explicitly here.
val signatureCheckResult = validateRootCertificate(toValidate)
.getOrElse(validateSignaturesAndDetermineMissingAuthorizers(toValidate, inStore))
val processedIdent = toValidate.selectMapping[IdentifierDelegation].forall { sigTx =>
processIdentifierDelegation(
toValidate.operation,
AuthorizedTopologyTransaction(sigTx),
)
}
val resultDns = toValidate.selectMapping[DecentralizedNamespaceDefinition].map { sigTx =>
processDecentralizedNamespaceDefinition(
sigTx.operation,
AuthorizedTopologyTransaction(sigTx),
)
}
val processedDns = resultDns.forall(_._1)
val mappingSpecificCheck = processedNs && processedIdent && processedDns
// the transaction is fully authorized if either
// 1. it's a root certificate, or
// 2. there is no authorization error and there are no missing authorizers
// We need to check explicitly for the root certificate here, because a REMOVE operation
// removes itself from the authorization graph, and therefore `isCurrentlyAuthorized` cannot validate it.
val authorizationResult =
if (NamespaceDelegation.isRootCertificate(toValidate))
Right(
(
toValidate,
RequiredAuthAuthorizations.empty, // no missing authorizers
)
)
else isCurrentlyAuthorized(toValidate, inStore)
authorizationResult match {
signatureCheckResult match {
// propagate the rejection reason
case Left(rejectionReason) => ValidatedTopologyTransaction(toValidate, Some(rejectionReason))
// if a transaction wasn't outright rejected, run some additional checks
case Right((validatedTx, missingAuthorizers)) =>
// The mappingSpecificCheck is a necessary condition for having sufficient authorizers.
val isFullyAuthorized =
mappingSpecificCheck && missingAuthorizers.isEmpty
// If a decentralizedNamespace transaction is fully authorized, reflect so in the decentralizedNamespace cache.
// Note: It seems a bit unsafe to update the caches on the assumption that the update will also be eventually
// persisted by the caller (a few levels up the call chain in TopologyStateProcessor.validateAndApplyAuthorization
// as the caller performs additional checks such as the numeric value of the serial number).
// But at least this is safer than where the check was previously (inside processDecentralizedNamespaceDefinition before even
// `isCurrentlyAuthorized` above had finished all checks).
if (isFullyAuthorized) {
resultDns.foreach { case (_, updateDecentralizedNamespaceCache) =>
updateDecentralizedNamespaceCache()
}
}
val acceptMissingAuthorizers =
validatedTx.isProposal && !expectFullAuthorization
// if the result of this validation is final (when processing transactions for the authorized store
// or sequenced transactions from the domain) we set the proposal flag according to whether the transaction
// is fully authorized or not.
// This must not be done when preliminarily validating transactions via the DomainTopologyManager, because
// the validation outcome might change when validating the transaction again after it has been sequenced.
val finalTransaction =
if (validationIsFinal) validatedTx.copy(isProposal = !isFullyAuthorized)
else validatedTx
// Either the transaction is fully authorized or the request allows partial authorization
if (isFullyAuthorized || acceptMissingAuthorizers) {
ValidatedTopologyTransaction(finalTransaction, None)
} else {
if (!missingAuthorizers.isEmpty) {
logger.debug(s"Missing authorizers: $missingAuthorizers")
}
if (!mappingSpecificCheck) {
logger.debug(s"Mapping specific check failed")
}
ValidatedTopologyTransaction(
toValidate,
Some(TopologyTransactionRejection.NotAuthorized),
)
}
handleSuccessfulSignatureChecks(
validatedTx,
missingAuthorizers,
expectFullAuthorization,
)
}
}
private def handleSuccessfulSignatureChecks(
toValidate: GenericSignedTopologyTransaction,
missingAuthorizers: RequiredAuthAuthorizations,
expectFullAuthorization: Boolean,
)(implicit
traceContext: TraceContext
): ValidatedTopologyTransaction[TopologyChangeOp, TopologyMapping] = {
// if there are no missing authorizers, we can update the internal caches
val isFullyAuthorized = if (missingAuthorizers.isEmpty) {
val processedNSD = toValidate
.selectMapping[NamespaceDelegation]
.forall { sigTx => processNamespaceDelegation(AuthorizedTopologyTransaction(sigTx)) }
val processedIDD = toValidate.selectMapping[IdentifierDelegation].forall { sigTx =>
processIdentifierDelegation(AuthorizedTopologyTransaction(sigTx))
}
val processedDND =
toValidate.selectMapping[DecentralizedNamespaceDefinition].forall { sigTx =>
processDecentralizedNamespaceDefinition(AuthorizedTopologyTransaction(sigTx))
}
val mappingSpecificCheck = processedNSD && processedIDD && processedDND
if (!mappingSpecificCheck) {
logger.debug(s"Mapping specific check failed")
}
mappingSpecificCheck
} else { false }
val acceptMissingAuthorizers =
toValidate.isProposal && !expectFullAuthorization
// if the result of this validation is final (when processing transactions for the authorized store
// or sequenced transactions from the domain) we set the proposal flag according to whether the transaction
// is fully authorized or not.
// This must not be done when preliminarily validating transactions via the DomainTopologyManager, because
// the validation outcome might change when validating the transaction again after it has been sequenced.
val finalTransaction =
if (validationIsFinal) toValidate.copy(isProposal = !isFullyAuthorized)
else toValidate
// Either the transaction is fully authorized or the request allows partial authorization
if (isFullyAuthorized || acceptMissingAuthorizers) {
ValidatedTopologyTransaction(finalTransaction, None)
} else {
if (!missingAuthorizers.isEmpty) {
logger.debug(s"Missing authorizers: $missingAuthorizers")
}
ValidatedTopologyTransaction(
toValidate,
Some(TopologyTransactionRejection.NotAuthorized),
)
}
}
/** Validates the signature of the removal of a root certificate.
* This check is done separately from the mechanism used for other topology transactions (ie isCurrentlyAuthorized),
* because removing a root certificate removes it from the authorization graph and therefore
* isCurrentlyAuthorized would not find the key to validate it.
*/
private def validateRootCertificate(
toValidate: GenericSignedTopologyTransaction
): Option[Either[
TopologyTransactionRejection,
(GenericSignedTopologyTransaction, RequiredAuthAuthorizations),
]] = {
toValidate
.selectMapping[NamespaceDelegation]
.filter(NamespaceDelegation.isRootCertificate)
.map { rootCert =>
val result = rootCert.signatures.toSeq.forgetNE
.traverse_(
pureCrypto
.verifySignature(
rootCert.hash.hash,
rootCert.mapping.target,
_,
)
)
.bimap(
TopologyTransactionRejection.SignatureCheckFailed,
_ => (toValidate, RequiredAuthAuthorizations.empty /* no missing authorizers */ ),
)
result
}
}
/** loads all identifier delegations into the identifier delegation cache
*
* This function has two "modes". On a cascading update affecting namespaces, we have
@ -291,16 +308,15 @@ class IncomingTopologyTransactionAuthorizationValidator(
}
private def processIdentifierDelegation(
op: TopologyChangeOp,
tx: AuthorizedIdentifierDelegation,
tx: AuthorizedIdentifierDelegation
): Boolean = {
// check authorization
val check = getAuthorizationCheckForNamespace(tx.mapping.identifier.namespace)
val keysAreValid = check.areValidAuthorizationKeys(tx.signingKeys, requireRoot = false)
val keysAreValid = check.existsAuthorizedKeyIn(tx.signingKeys, requireRoot = false)
// update identifier delegation cache if necessary
if (keysAreValid) {
val updateOp: Set[AuthorizedIdentifierDelegation] => Set[AuthorizedIdentifierDelegation] =
op match {
tx.operation match {
case TopologyChangeOp.Replace =>
x => x + tx
case TopologyChangeOp.Remove =>
@ -313,12 +329,11 @@ class IncomingTopologyTransactionAuthorizationValidator(
}
private def processNamespaceDelegation(
op: TopologyChangeOp,
tx: AuthorizedNamespaceDelegation,
tx: AuthorizedNamespaceDelegation
)(implicit traceContext: TraceContext): Boolean = {
val graph = getAuthorizationGraphForNamespace(tx.mapping.namespace)
// add or remove including authorization check
op match {
tx.operation match {
case TopologyChangeOp.Replace => graph.add(tx)
case TopologyChangeOp.Remove => graph.remove(tx)
}
@ -330,9 +345,8 @@ class IncomingTopologyTransactionAuthorizationValidator(
* by the caller once the mapping is to be committed.
*/
private def processDecentralizedNamespaceDefinition(
op: TopologyChangeOp,
tx: AuthorizedDecentralizedNamespaceDefinition,
)(implicit traceContext: TraceContext): (Boolean, () => Unit) = {
tx: AuthorizedDecentralizedNamespaceDefinition
)(implicit traceContext: TraceContext): Boolean = {
val decentralizedNamespace = tx.mapping.namespace
val dnsGraph = decentralizedNamespaceCache
.get(decentralizedNamespace)
@ -360,26 +374,30 @@ class IncomingTopologyTransactionAuthorizationValidator(
)
newDecentralizedNamespaceGraph
}
val isAuthorized = dnsGraph.areValidAuthorizationKeys(tx.signingKeys, false)
val isAuthorized = dnsGraph.existsAuthorizedKeyIn(tx.signingKeys, requireRoot = false)
(
isAuthorized,
() => {
val ownerGraphs = tx.mapping.owners.forgetNE.toSeq.map(getAuthorizationGraphForNamespace)
decentralizedNamespaceCache
.put(
decentralizedNamespace,
(tx.mapping, dnsGraph.copy(dnd = tx.mapping, ownerGraphs = ownerGraphs)),
)
.discard
},
)
if (isAuthorized) {
tx.operation match {
case TopologyChangeOp.Remove =>
decentralizedNamespaceCache.remove(decentralizedNamespace).discard
case TopologyChangeOp.Replace =>
val ownerGraphs = tx.mapping.owners.forgetNE.toSeq.map(getAuthorizationGraphForNamespace)
decentralizedNamespaceCache
.put(
decentralizedNamespace,
(tx.mapping, dnsGraph.copy(dnd = tx.mapping, ownerGraphs = ownerGraphs)),
)
.discard
}
}
isAuthorized
}
private def determineRelevantUidsAndNamespaces(
transactionsToValidate: Seq[GenericSignedTopologyTransaction],
transactionsInStore: Map[MappingHash, GenericTopologyTransaction],
): Future[(UpdateAggregation, Seq[GenericValidatedTopologyTransaction])] = {
toValidate: GenericSignedTopologyTransaction,
inStore: Option[GenericTopologyTransaction],
): Future[(UpdateAggregation, GenericValidatedTopologyTransaction)] = {
def verifyDomain(
tx: GenericSignedTopologyTransaction
): Either[TopologyTransactionRejection, Unit] =
@ -395,22 +413,19 @@ class IncomingTopologyTransactionAuthorizationValidator(
// we need to figure out for which namespaces and uids we need to load the validation checks
// and for which uids and namespaces we'll have to perform a cascading update
import UpdateAggregation.monoid
transactionsToValidate.parFoldMapA { toValidate =>
EitherT
.fromEither[Future](verifyDomain(toValidate))
.fold(
rejection =>
(UpdateAggregation(), Seq(ValidatedTopologyTransaction(toValidate, Some(rejection)))),
_ =>
(
UpdateAggregation().add(
toValidate.mapping,
transactionsInStore.get(toValidate.mapping.uniqueKey),
),
Seq(ValidatedTopologyTransaction(toValidate, None)),
EitherT
.fromEither[Future](verifyDomain(toValidate))
.fold(
rejection =>
(UpdateAggregation(), ValidatedTopologyTransaction(toValidate, Some(rejection))),
_ =>
(
UpdateAggregation().add(
toValidate.mapping,
inStore,
),
)
}
ValidatedTopologyTransaction(toValidate, None),
),
)
}
}

View File

@ -43,7 +43,7 @@ trait TransactionAuthorizationValidator {
protected def pureCrypto: CryptoPureApi
def isCurrentlyAuthorized(
def validateSignaturesAndDetermineMissingAuthorizers(
toValidate: GenericSignedTopologyTransaction,
inStore: Option[GenericSignedTopologyTransaction],
)(implicit
@ -72,41 +72,41 @@ trait TransactionAuthorizationValidator {
val namespaceWithRootAuthorizations =
required.namespacesWithRoot.map { ns =>
val check = getAuthorizationCheckForNamespace(ns)
val keysWithDelegation = check.getValidAuthorizationKeys(
val keysUsed = check.keysSupportingAuthorization(
signingKeys,
requireRoot = true,
)
val keysAuthorizeNamespace =
check.areValidAuthorizationKeys(signingKeys, requireRoot = true)
(ns -> (keysAuthorizeNamespace, keysWithDelegation))
check.existsAuthorizedKeyIn(signingKeys, requireRoot = true)
(ns -> (keysAuthorizeNamespace, keysUsed))
}.toMap
// Now let's determine which namespaces and uids actually delegated to any of the keys
val namespaceAuthorizations = required.namespaces.map { ns =>
val check = getAuthorizationCheckForNamespace(ns)
val keysWithDelegation = check.getValidAuthorizationKeys(
val keysUsed = check.keysSupportingAuthorization(
signingKeys,
requireRoot = false,
)
val keysAuthorizeNamespace = check.areValidAuthorizationKeys(signingKeys, requireRoot = false)
(ns -> (keysAuthorizeNamespace, keysWithDelegation))
val keysAuthorizeNamespace = check.existsAuthorizedKeyIn(signingKeys, requireRoot = false)
(ns -> (keysAuthorizeNamespace, keysUsed))
}.toMap
val uidAuthorizations =
required.uids.map { uid =>
val check = getAuthorizationCheckForNamespace(uid.namespace)
val keysWithDelegation = check.getValidAuthorizationKeys(
val keysUsed = check.keysSupportingAuthorization(
signingKeys,
requireRoot = false,
)
val keysAuthorizeNamespace =
check.areValidAuthorizationKeys(signingKeys, requireRoot = false)
check.existsAuthorizedKeyIn(signingKeys, requireRoot = false)
val keyForUid =
getAuthorizedIdentifierDelegation(check, uid, toValidate.signatures.map(_.signedBy))
.map(_.mapping.target)
(uid -> (keysAuthorizeNamespace || keyForUid.nonEmpty, keysWithDelegation ++ keyForUid))
(uid -> (keysAuthorizeNamespace || keyForUid.nonEmpty, keysUsed ++ keyForUid))
}.toMap
val extraKeyAuthorizations = {
@ -132,7 +132,7 @@ trait TransactionAuthorizationValidator {
.toMap
}
val allAuthorizingKeys =
val allKeysUsedForAuthorization =
(namespaceWithRootAuthorizations.values ++
namespaceAuthorizations.values ++
uidAuthorizations.values ++
@ -145,9 +145,9 @@ trait TransactionAuthorizationValidator {
logAuthorizations("Authorizations for UIDs", uidAuthorizations)
logAuthorizations("Authorizations for extraKeys", extraKeyAuthorizations)
logger.debug(s"All authorizing keys: ${allAuthorizingKeys.keySet}")
logger.debug(s"All keys used for authorization: ${allKeysUsedForAuthorization.keySet}")
val superfluousKeys = signingKeys -- allAuthorizingKeys.keys
val superfluousKeys = signingKeys -- allKeysUsedForAuthorization.keys
for {
_ <- Either.cond[TopologyTransactionRejection, Unit](
// there must be at least 1 key used for the signatures for one of the delegation mechanisms
@ -160,7 +160,7 @@ trait TransactionAuthorizationValidator {
},
)
txWithValidSignatures <- toValidate
txWithSignaturesToVerify <- toValidate
.removeSignatures(superfluousKeys)
.toRight({
logger.info(
@ -169,9 +169,9 @@ trait TransactionAuthorizationValidator {
TopologyTransactionRejection.NoDelegationFoundForKeys(superfluousKeys)
})
_ <- txWithValidSignatures.signatures.forgetNE.toList
_ <- txWithSignaturesToVerify.signatures.forgetNE.toList
.traverse_(sig =>
allAuthorizingKeys
allKeysUsedForAuthorization
.get(sig.signedBy)
.toRight({
val msg =
@ -182,7 +182,7 @@ trait TransactionAuthorizationValidator {
.flatMap(key =>
pureCrypto
.verifySignature(
txWithValidSignatures.hash.hash,
txWithSignaturesToVerify.hash.hash,
key,
sig,
)
@ -202,7 +202,7 @@ trait TransactionAuthorizationValidator {
extraKeys = onlyFullyAuthorized(extraKeyAuthorizations),
)
(
txWithValidSignatures,
txWithSignaturesToVerify,
requiredAuth
.satisfiedByActualAuthorizers(actual)
.fold(identity, _ => RequiredAuthAuthorizations.empty),
@ -236,7 +236,7 @@ trait TransactionAuthorizationValidator {
): Option[AuthorizedIdentifierDelegation] = {
getIdentifierDelegationsForUid(uid)
.find(aid =>
authKeys(aid.mapping.target.id) && graph.areValidAuthorizationKeys(
authKeys(aid.mapping.target.id) && graph.existsAuthorizedKeyIn(
aid.signingKeys,
requireRoot = false,
)
@ -254,9 +254,7 @@ trait TransactionAuthorizationValidator {
namespace: Namespace
): AuthorizationCheck = {
val decentralizedNamespaceCheck = decentralizedNamespaceCache.get(namespace).map(_._2)
val namespaceCheck = namespaceCache.get(
namespace
)
val namespaceCheck = namespaceCache.get(namespace)
decentralizedNamespaceCheck
.orElse(namespaceCheck)
.getOrElse(AuthorizationCheck.empty)

View File

@ -5,7 +5,6 @@ package com.digitalasset.canton.topology.store
import cats.data.EitherT
import cats.syntax.traverse.*
import com.digitalasset.daml.lf.data.Ref.PackageId
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.ProtoDeserializationError
import com.digitalasset.canton.config.CantonRequireTypes.{LengthLimitedString, String255}
@ -40,6 +39,7 @@ import com.digitalasset.canton.topology.transaction.TopologyTransaction.{
import com.digitalasset.canton.topology.transaction.*
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.version.ProtocolVersion
import com.digitalasset.daml.lf.data.Ref.PackageId
import com.google.common.annotations.VisibleForTesting
import scala.concurrent.duration.Duration

View File

@ -10,14 +10,9 @@ import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.logging.ErrorLoggingContext
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.protocol.OnboardingRestriction
import com.digitalasset.canton.topology.*
import com.digitalasset.canton.topology.processing.EffectiveTime
import com.digitalasset.canton.topology.transaction.TopologyMapping
import com.digitalasset.canton.topology.{
DomainId,
Member,
ParticipantId,
PartyId,
TopologyManagerError,
}
sealed trait TopologyTransactionRejection extends PrettyPrinting with Product with Serializable {
def asString: String
@ -45,25 +40,12 @@ object TopologyTransactionRejection {
TopologyManagerError.UnauthorizedTransaction.Failure(asString)
}
final case class ThresholdTooHigh(actual: Int, mustBeAtMost: Int)
extends TopologyTransactionRejection {
override def asString: String =
s"Threshold must not be higher than $mustBeAtMost, but was $actual."
override def pretty: Pretty[ThresholdTooHigh] = prettyOfString(_ => asString)
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) = {
TopologyManagerError.InvalidThreshold.ThresholdTooHigh(actual, mustBeAtMost)
}
}
final case class UnknownParties(parties: Seq[PartyId]) extends TopologyTransactionRejection {
override def asString: String = s"Parties ${parties.sorted.mkString(", ")} are unknown."
override def pretty: Pretty[UnknownParties.this.type] = prettyOfString(_ => asString)
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
TopologyManagerError.UnknownParties.Failure(parties)
}
final case class OnboardingRestrictionInPlace(
@ -192,6 +174,25 @@ object TopologyTransactionRejection {
)
}
final case class PartyExceedsHostingLimit(
partyId: PartyId,
limit: Int,
numParticipants: Int,
) extends TopologyTransactionRejection {
override def asString: String =
s"Party $partyId exceeds hosting limit of $limit with desired number of $numParticipants hosting participants."
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
TopologyManagerError.PartyExceedsHostingLimit.Reject(partyId, limit, numParticipants)
override def pretty: Pretty[PartyExceedsHostingLimit.this.type] =
prettyOfClass(
param("partyId", _.partyId),
param("limit", _.limit),
param("number of hosting participants", _.numParticipants),
)
}
final case class MissingMappings(missing: Map[Member, Seq[TopologyMapping.Code]])
extends TopologyTransactionRejection {
override def asString: String = {
@ -209,4 +210,24 @@ object TopologyTransactionRejection {
override def pretty: Pretty[MissingMappings.this.type] = prettyOfString(_ => asString)
}
final case class MissingDomainParameters(effective: EffectiveTime)
extends TopologyTransactionRejection {
override def asString: String = s"Missing domain parameters at $effective"
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
TopologyManagerError.MissingTopologyMapping.MissingDomainParameters(effective)
override def pretty: Pretty[MissingDomainParameters.this.type] = prettyOfString(_ => asString)
}
final case class NamespaceAlreadyInUse(namespace: Namespace)
extends TopologyTransactionRejection {
override def asString: String = s"The namespace $namespace is already used by another entity."
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
TopologyManagerError.NamespaceAlreadyInUse.Reject(namespace)
override def pretty: Pretty[NamespaceAlreadyInUse.this.type] = prettyOfString(_ => asString)
}
}

View File

@ -401,14 +401,14 @@ object NamespaceDelegation {
target: SigningPublicKey,
isRootDelegation: Boolean,
): NamespaceDelegation =
create(namespace, target, isRootDelegation).fold(err => sys.error(err), identity)
create(namespace, target, isRootDelegation).valueOr(err =>
throw new IllegalArgumentException((err))
)
def code: TopologyMapping.Code = Code.NamespaceDelegation
/** Returns true if the given transaction is a self-signed root certificate */
def isRootCertificate(sit: GenericSignedTopologyTransaction): Boolean = {
((sit.operation == TopologyChangeOp.Replace && sit.serial == PositiveInt.one) ||
(sit.operation == TopologyChangeOp.Remove && sit.serial != PositiveInt.one)) &&
sit.mapping
.select[transaction.NamespaceDelegation]
.exists(ns =>
@ -944,8 +944,8 @@ final case class PartyHostingLimits(
override def code: Code = Code.PartyHostingLimits
override def namespace: Namespace = domainId.namespace
override def maybeUid: Option[UniqueIdentifier] = Some(domainId.uid)
override def namespace: Namespace = partyId.namespace
override def maybeUid: Option[UniqueIdentifier] = Some(partyId.uid)
override def restrictedToDomain: Option[DomainId] = Some(domainId)
@ -1057,7 +1057,7 @@ object HostingParticipant {
} yield HostingParticipant(participantId, permission)
}
final case class PartyToParticipant(
final case class PartyToParticipant private (
partyId: PartyId,
domainId: Option[DomainId],
threshold: PositiveInt,
@ -1135,6 +1135,51 @@ final case class PartyToParticipant(
object PartyToParticipant {
def create(
partyId: PartyId,
domainId: Option[DomainId],
threshold: PositiveInt,
participants: Seq[HostingParticipant],
groupAddressing: Boolean,
): Either[String, PartyToParticipant] = {
val noDuplicatePParticipants = {
val duplicatePermissions =
participants.groupBy(_.participantId).values.filter(_.size > 1).toList
Either.cond(
duplicatePermissions.isEmpty,
(),
s"Participants may only be assigned one permission: $duplicatePermissions",
)
}
val thresholdCanBeMet = {
val numConfirmingParticipants =
participants.count(_.permission >= ParticipantPermission.Confirmation)
Either
.cond(
// we allow to not meet the threshold criteria if there are only observing participants.
// but as soon as there is 1 confirming participant, the threshold must theoretically be satisfiable,
// otherwise the party can never confirm a transaction.
numConfirmingParticipants == 0 || threshold.value <= numConfirmingParticipants,
(),
s"Party $partyId cannot meet threshold of $threshold confirming participants with participants $participants",
)
.map(_ => PartyToParticipant(partyId, domainId, threshold, participants, groupAddressing))
}
noDuplicatePParticipants.flatMap(_ => thresholdCanBeMet)
}
def tryCreate(
partyId: PartyId,
domainId: Option[DomainId],
threshold: PositiveInt,
participants: Seq[HostingParticipant],
groupAddressing: Boolean,
): PartyToParticipant =
create(partyId, domainId, threshold, participants, groupAddressing).valueOr(err =>
throw new IllegalArgumentException(err)
)
def uniqueKey(partyId: PartyId, domainId: Option[DomainId]): MappingHash =
TopologyMapping.buildUniqueKey(code)(
_.add(partyId.toProtoPrimitive).add(domainId.fold("")(_.toProtoPrimitive))
@ -1158,7 +1203,7 @@ object PartyToParticipant {
}
// AuthorityOf
final case class AuthorityOf(
final case class AuthorityOf private (
partyId: PartyId,
domainId: Option[DomainId],
threshold: PositiveInt,
@ -1199,6 +1244,21 @@ final case class AuthorityOf(
object AuthorityOf {
def create(
partyId: PartyId,
domainId: Option[DomainId],
threshold: PositiveInt,
parties: Seq[PartyId],
): Either[String, AuthorityOf] = {
Either
.cond(
threshold.value <= parties.size,
(),
s"Invalid threshold $threshold for $partyId with authorizers $parties",
)
.map(_ => AuthorityOf(partyId, domainId, threshold, parties))
}
def uniqueKey(partyId: PartyId, domainId: Option[DomainId]): MappingHash =
TopologyMapping.buildUniqueKey(code)(
_.add(partyId.toProtoPrimitive).add(domainId.fold("")(_.toProtoPrimitive))
@ -1217,7 +1277,9 @@ object AuthorityOf {
if (value.domain.nonEmpty)
DomainId.fromProtoPrimitive(value.domain, "domain").map(_.some)
else Right(None)
} yield AuthorityOf(partyId, domainId, threshold, parties)
authorityOf <- create(partyId, domainId, threshold, parties)
.leftMap(ProtoDeserializationError.OtherError)
} yield authorityOf
}
/** Dynamic domain parameter settings for the domain

View File

@ -5,14 +5,19 @@ package com.digitalasset.canton.topology.transaction
import cats.data.EitherT
import cats.instances.future.*
import cats.instances.order.*
import cats.syntax.semigroup.*
import com.digitalasset.canton.crypto.KeyPurpose
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.protocol.OnboardingRestriction
import com.digitalasset.canton.protocol.{DynamicDomainParameters, OnboardingRestriction}
import com.digitalasset.canton.topology.*
import com.digitalasset.canton.topology.processing.EffectiveTime
import com.digitalasset.canton.topology.store.StoredTopologyTransactions.PositiveStoredTopologyTransactions
import com.digitalasset.canton.topology.store.TopologyTransactionRejection.{
InvalidTopologyMapping,
NamespaceAlreadyInUse,
}
import com.digitalasset.canton.topology.store.{
TopologyStore,
TopologyStoreId,
@ -24,7 +29,6 @@ import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.EitherTUtil
import scala.concurrent.{ExecutionContext, Future}
import scala.math.Ordered.*
trait TopologyMappingChecks {
def checkTransaction(
@ -127,6 +131,27 @@ class ValidatingTopologyMappingChecks(
.select[TopologyChangeOp.Replace, AuthorityOf]
.map(checkAuthorityOf(effective, _))
case (
Code.DecentralizedNamespaceDefinition,
None | Some(Code.DecentralizedNamespaceDefinition),
) =>
toValidate
.select[TopologyChangeOp.Replace, DecentralizedNamespaceDefinition]
.map(
checkDecentralizedNamespaceDefinitionReplace(
_,
inStore.flatMap(_.select[TopologyChangeOp, DecentralizedNamespaceDefinition]),
)
)
case (
Code.NamespaceDelegation,
None | Some(Code.NamespaceDelegation),
) =>
toValidate
.select[TopologyChangeOp.Replace, NamespaceDelegation]
.map(checkNamespaceDelegationReplace)
case otherwise => None
}
@ -190,6 +215,33 @@ class ValidatingTopologyMappingChecks(
ensureParticipantDoesNotHostParties(effective, toValidate.mapping.participantId)
}
private def loadDomainParameters(
effective: EffectiveTime
)(implicit
traceContext: TraceContext
): EitherT[Future, TopologyTransactionRejection, DynamicDomainParameters] = {
loadFromStore(effective, DomainParametersState.code).subflatMap { domainParamCandidates =>
val params = domainParamCandidates.result.view
.flatMap(_.selectMapping[DomainParametersState])
.map(_.mapping.parameters)
.toList
params match {
case Nil =>
logger.error(
"Can not determine domain parameters."
)
Left(TopologyTransactionRejection.MissingDomainParameters(effective))
case param :: Nil => Right(param)
case param :: rest =>
logger.error(
s"Multiple domain parameters at ${effective} ${rest.size + 1}. Using first one: $param."
)
Right(param)
}
}
}
private def checkDomainTrustCertificateReplace(
effective: EffectiveTime,
toValidate: SignedTopologyTransaction[TopologyChangeOp, DomainTrustCertificate],
@ -199,25 +251,7 @@ class ValidatingTopologyMappingChecks(
def loadOnboardingRestriction()
: EitherT[Future, TopologyTransactionRejection, OnboardingRestriction] = {
loadFromStore(effective, DomainParametersState.code).map { domainParamCandidates =>
val restrictions = domainParamCandidates.result.view
.flatMap(_.selectMapping[DomainParametersState])
.map(_.mapping.parameters.onboardingRestriction)
.toList
restrictions match {
case Nil =>
logger.error(
"Can not determine the onboarding restriction. Assuming the domain is locked."
)
OnboardingRestriction.RestrictedLocked
case param :: Nil => param
case param :: rest =>
logger.error(
s"Multiple domain parameters at ${effective} ${rest.size + 1}. Using first one with restriction ${param}."
)
param
}
}
loadDomainParameters(effective).map(_.onboardingRestriction)
}
def checkDomainIsNotLocked(restriction: OnboardingRestriction) = {
@ -311,65 +345,97 @@ class ValidatingTopologyMappingChecks(
traceContext: TraceContext
): EitherT[Future, TopologyTransactionRejection, Unit] = {
import toValidate.mapping
val numConfirmingParticipants =
mapping.participants.count(_.permission >= ParticipantPermission.Confirmation)
def checkParticipants() = {
val newParticipants = mapping.participants.map(_.participantId).toSet --
inStore.toList.flatMap(_.mapping.participants.map(_.participantId))
for {
participantTransactions <- EitherT.right[TopologyTransactionRejection](
store
.findPositiveTransactions(
CantonTimestamp.MaxValue,
asOfInclusive = false,
isProposal = false,
types = Seq(DomainTrustCertificate.code, OwnerToKeyMapping.code),
filterUid = Some(newParticipants.toSeq.map(_.uid)),
filterNamespace = None,
)
)
// check that all participants are known on the domain
missingParticipantCertificates = newParticipants -- participantTransactions
.collectOfMapping[DomainTrustCertificate]
.result
.map(_.mapping.participantId)
_ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
missingParticipantCertificates.isEmpty,
TopologyTransactionRejection.UnknownMembers(missingParticipantCertificates.toSeq),
)
// check that all known participants have keys registered
participantsWithInsufficientKeys =
newParticipants -- participantTransactions
.collectOfMapping[OwnerToKeyMapping]
.result
.view
.filter { tx =>
val keyPurposes = tx.mapping.keys.map(_.purpose).toSet
requiredKeyPurposes.forall(keyPurposes)
}
.map(_.mapping.member)
.collect { case pid: ParticipantId => pid }
.toSeq
_ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
participantsWithInsufficientKeys.isEmpty,
TopologyTransactionRejection.InsufficientKeys(participantsWithInsufficientKeys.toSeq),
)
} yield {
()
}
}
def checkHostingLimits(effective: EffectiveTime) = for {
hostingLimitsCandidates <- loadFromStore(
effective,
code = PartyHostingLimits.code,
filterUid = Some(Seq(toValidate.mapping.partyId.uid)),
)
hostingLimits = hostingLimitsCandidates.result.view
.flatMap(_.selectMapping[PartyHostingLimits])
.map(_.mapping.quota)
.toList
partyHostingLimit = hostingLimits match {
case Nil => // No hosting limits found. This is expected if no restrictions are in place
None
case quota :: Nil => Some(quota)
case multiple @ (quota :: _) =>
logger.error(
s"Multiple PartyHostingLimits at ${effective} ${multiple.size}. Using first one with quota $quota."
)
Some(quota)
}
// TODO(#14050) load default party hosting limits from dynamic domain parameters in case the party
// doesn't have a specific PartyHostingLimits mapping issued by the domain.
_ <- partyHostingLimit match {
case Some(limit) =>
EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
toValidate.mapping.participants.size <= limit,
TopologyTransactionRejection.PartyExceedsHostingLimit(
toValidate.mapping.partyId,
limit,
toValidate.mapping.participants.size,
),
)
case None => EitherTUtil.unit[TopologyTransactionRejection]
}
} yield ()
for {
// check the threshold
_ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
mapping.threshold.value <= numConfirmingParticipants,
TopologyTransactionRejection.ThresholdTooHigh(
mapping.threshold.value,
numConfirmingParticipants,
),
)
_ <- checkParticipants()
_ <- checkHostingLimits(EffectiveTime.MaxValue)
} yield ()
newParticipants = mapping.participants.map(_.participantId).toSet --
inStore.toList.flatMap(_.mapping.participants.map(_.participantId))
participantTransactions <- EitherT.right[TopologyTransactionRejection](
store
.findPositiveTransactions(
CantonTimestamp.MaxValue,
asOfInclusive = false,
isProposal = false,
types = Seq(DomainTrustCertificate.code, OwnerToKeyMapping.code),
filterUid = Some(newParticipants.toSeq.map(_.uid)),
filterNamespace = None,
)
)
// check that all participants are known on the domain
missingParticipantCertificates = newParticipants -- participantTransactions
.collectOfMapping[DomainTrustCertificate]
.result
.map(_.mapping.participantId)
_ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
missingParticipantCertificates.isEmpty,
TopologyTransactionRejection.UnknownMembers(missingParticipantCertificates.toSeq),
)
// check that all known participants have keys registered
participantsWithInsufficientKeys =
newParticipants -- participantTransactions
.collectOfMapping[OwnerToKeyMapping]
.result
.view
.filter { tx =>
val keyPurposes = tx.mapping.keys.map(_.purpose).toSet
requiredKeyPurposes.forall(keyPurposes)
}
.map(_.mapping.member)
.collect { case pid: ParticipantId => pid }
.toSeq
_ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
participantsWithInsufficientKeys.isEmpty,
TopologyTransactionRejection.InsufficientKeys(participantsWithInsufficientKeys.toSeq),
)
} yield {
()
}
}
private def checkOwnerToKeyMappingReplace(
@ -465,15 +531,7 @@ class ValidatingTopologyMappingChecks(
val newMediators = (toValidate.mapping.allMediatorsInGroup.toSet -- inStore.toList.flatMap(
_.mapping.allMediatorsInGroup
)).map(identity[Member])
val thresholdCheck = EitherTUtil.condUnitET(
toValidate.mapping.threshold.value <= toValidate.mapping.active.size,
TopologyTransactionRejection.ThresholdTooHigh(
toValidate.mapping.threshold.value,
toValidate.mapping.active.size,
),
)
thresholdCheck.flatMap(_ => checkMissingNsdAndOtkMappings(effectiveTime, newMediators))
checkMissingNsdAndOtkMappings(effectiveTime, newMediators)
}
private def checkSequencerDomainStateReplace(
@ -485,14 +543,7 @@ class ValidatingTopologyMappingChecks(
_.mapping.allSequencers
)).map(identity[Member])
val thresholdCheck = EitherTUtil.condUnitET(
toValidate.mapping.threshold.value <= toValidate.mapping.active.size,
TopologyTransactionRejection.ThresholdTooHigh(
toValidate.mapping.threshold.value,
toValidate.mapping.active.size,
),
)
thresholdCheck.flatMap(_ => checkMissingNsdAndOtkMappings(effectiveTime, newSequencers))
checkMissingNsdAndOtkMappings(effectiveTime, newSequencers)
}
private def checkAuthorityOf(
@ -521,15 +572,85 @@ class ValidatingTopologyMappingChecks(
}
}
val checkThreshold = {
val actual = toValidate.mapping.threshold.value
val mustBeAtMost = toValidate.mapping.parties.size
EitherTUtil.condUnitET(
actual <= mustBeAtMost,
TopologyTransactionRejection.ThresholdTooHigh(actual, mustBeAtMost),
)
checkPartiesAreKnown()
}
private def checkDecentralizedNamespaceDefinitionReplace(
toValidate: SignedTopologyTransaction[
TopologyChangeOp.Replace,
DecentralizedNamespaceDefinition,
],
inStore: Option[SignedTopologyTransaction[
TopologyChangeOp,
DecentralizedNamespaceDefinition,
]],
)(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] = {
def checkDecentralizedNamespaceDerivedFromOwners()
: EitherT[Future, TopologyTransactionRejection, Unit] =
if (inStore.isEmpty) {
// The very first decentralized namespace definition must have namespace computed from the owners
EitherTUtil.condUnitET(
toValidate.mapping.namespace == DecentralizedNamespaceDefinition
.computeNamespace(toValidate.mapping.owners),
InvalidTopologyMapping(
s"The decentralized namespace ${toValidate.mapping.namespace} is not derived from the owners ${toValidate.mapping.owners.toSeq.sorted}"
),
)
} else {
EitherTUtil.unit
}
def checkNoClashWithRootCertificates()(implicit
traceContext: TraceContext
): EitherT[Future, TopologyTransactionRejection, Unit] = {
loadFromStore(
EffectiveTime.MaxValue,
Code.NamespaceDelegation,
filterUid = None,
filterNamespace = Some(Seq(toValidate.mapping.namespace)),
).flatMap { namespaceDelegations =>
val foundRootCertWithSameNamespace = namespaceDelegations.result.exists(stored =>
NamespaceDelegation.isRootCertificate(stored.transaction)
)
EitherTUtil.condUnitET(
!foundRootCertWithSameNamespace,
NamespaceAlreadyInUse(toValidate.mapping.namespace),
)
}
}
checkThreshold.flatMap(_ => checkPartiesAreKnown())
for {
_ <- checkDecentralizedNamespaceDerivedFromOwners()
_ <- checkNoClashWithRootCertificates()
} yield ()
}
private def checkNamespaceDelegationReplace(
toValidate: SignedTopologyTransaction[
TopologyChangeOp.Replace,
NamespaceDelegation,
]
)(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] = {
def checkNoClashWithDecentralizedNamespaces()(implicit
traceContext: TraceContext
): EitherT[Future, TopologyTransactionRejection, Unit] = {
EitherTUtil.ifThenET(NamespaceDelegation.isRootCertificate(toValidate)) {
loadFromStore(
EffectiveTime.MaxValue,
Code.DecentralizedNamespaceDefinition,
filterUid = None,
filterNamespace = Some(Seq(toValidate.mapping.namespace)),
).flatMap { dns =>
val foundDecentralizedNamespaceWithSameNamespace = dns.result.nonEmpty
EitherTUtil.condUnitET(
!foundDecentralizedNamespaceWithSameNamespace,
NamespaceAlreadyInUse(toValidate.mapping.namespace),
)
}
}
}
checkNoClashWithDecentralizedNamespaces()
}
}

View File

@ -6,7 +6,6 @@ package com.digitalasset.canton.tracing
import com.daml.scalautil.Statement.discard
import com.digitalasset.canton.concurrent.DirectExecutionContext
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.lifecycle.FutureUnlessShutdownImpl.AbortedDueToShutdownException
import com.digitalasset.canton.logging.TracedLogger
import com.github.blemale.scaffeine.{AsyncLoadingCache, Scaffeine}
@ -75,8 +74,10 @@ class TracedAsyncLoadingCache[K, V](
)(tracedLogger: TracedLogger) {
implicit private[this] val ec: ExecutionContext = DirectExecutionContext(tracedLogger)
/** @see com.github.blemale.scaffeine.AsyncLoadingCache.get
*/
/*
* See com.github.blemale.scaffeine.AsyncLoadingCache.get
* If shutting down the future returned will be failed with a AbortedDueToShutdownException
*/
def get(key: K)(implicit traceContext: TraceContext): Future[V] =
underlying.get(TracedKey(key)(traceContext))
@ -85,12 +86,14 @@ class TracedAsyncLoadingCache[K, V](
discard(underlying.synchronous().asMap().filterInPlace((t, v) => !filter(t.key, v)))
}
def getUS(key: K)(implicit traceContext: TraceContext): FutureUnlessShutdown[V] = {
def getUS(key: K)(implicit traceContext: TraceContext): FutureUnlessShutdown[V] =
FutureUnlessShutdown.transformAbortedF(get(key))
}
/** @see com.github.blemale.scaffeine.AsyncLoadingCache.getAll
*/
/*
* See com.github.blemale.scaffeine.AsyncLoadingCache.getAll
* If shutting down the future returned will be failed with a AbortedDueToShutdownException wrapped inside
* a java.util.concurrent.CompletionException
*/
def getAll(keys: Iterable[K])(implicit traceContext: TraceContext): Future[Map[K, V]] =
underlying
.getAll(keys.map(TracedKey(_)(traceContext)))
@ -98,16 +101,9 @@ class TracedAsyncLoadingCache[K, V](
def getAllUS(
keys: Iterable[K]
)(implicit traceContext: TraceContext): FutureUnlessShutdown[Map[K, V]] =
try
FutureUnlessShutdown.outcomeF(
underlying
.getAll(keys.map(TracedKey(_)(traceContext)))
.map(_.map { case (tracedKey, value) => tracedKey.key -> value })(ec)
)
catch {
case _: AbortedDueToShutdownException => FutureUnlessShutdown.abortedDueToShutdown
}
)(implicit traceContext: TraceContext): FutureUnlessShutdown[Map[K, V]] = {
FutureUnlessShutdown.transformAbortedF(getAll(keys))
}
override def toString = s"TracedAsyncLoadingCache($underlying)"
}

View File

@ -4,11 +4,11 @@
package com.digitalasset.canton.util
import cats.{Monad, Order}
import com.digitalasset.canton.LfPartyId
import com.digitalasset.canton.protocol.*
import com.digitalasset.daml.lf.data.*
import com.digitalasset.daml.lf.transaction.TransactionVersion
import com.digitalasset.daml.lf.value.Value
import com.digitalasset.canton.LfPartyId
import com.digitalasset.canton.protocol.*
import scala.annotation.nowarn

View File

@ -62,7 +62,10 @@ trait HasVersionedWrapper[ValueClass] extends HasVersionedToByteString {
def toByteArray(version: ProtocolVersion): Array[Byte] = toByteString(version).toByteArray
/** Writes the byte string representation of the corresponding `UntypedVersionedMessage` wrapper of this instance to a file. */
def writeToFile(outputFile: String, version: ProtocolVersion = ProtocolVersion.latest): Unit = {
def writeToFile(
outputFile: String,
version: ProtocolVersion = ProtocolVersion.latest,
): Unit = {
val bytes = toByteString(version)
BinaryFileUtil.writeByteStringToFile(outputFile, bytes)
}

View File

@ -9,7 +9,14 @@ import com.digitalasset.canton.ProtoDeserializationError.OtherError
import com.digitalasset.canton.buildinfo.BuildInfo
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.version.ProtocolVersion.{deleted, deprecated, supported, unstable}
import com.digitalasset.canton.version.ProtocolVersion.{
beta,
deleted,
deprecated,
stable,
supported,
unstable,
}
import pureconfig.error.FailureReason
import pureconfig.{ConfigReader, ConfigWriter}
import slick.jdbc.{GetResult, PositionedParameters, SetParameter}
@ -48,7 +55,13 @@ import slick.jdbc.{GetResult, PositionedParameters, SetParameter}
* As a result, you may have to modify a couple of protobuf definitions and mark them as stable as well.
*
* - Remove `v<N>` from [[com.digitalasset.canton.version.ProtocolVersion.unstable]]
* and add it to [[com.digitalasset.canton.buildinfo.BuildInfo.protocolVersions]].
* and add it to [[com.digitalasset.canton.buildinfo.BuildInfo.stableProtocolVersions]].
*
* How to release a protocol version `N` as Beta:
* - Switch the type parameter of the protocol version constant `v<N>` from
* [[com.digitalasset.canton.version.ProtocolVersionAnnotation.Unstable]] to [[com.digitalasset.canton.version.ProtocolVersionAnnotation.Beta]]
* - Remove `v<N>` from [[com.digitalasset.canton.version.ProtocolVersion.unstable]]
* and add it to [[com.digitalasset.canton.buildinfo.BuildInfo.betaProtocolVersions]].
*
* - Check the test jobs for protocol versions:
* Likely `N` will become the default protocol version used by the `test` job,
@ -67,7 +80,10 @@ sealed case class ProtocolVersion private[version] (v: Int)
def isDeprecated: Boolean = deprecated.contains(this)
def isUnstable: Boolean = unstable.contains(this)
def isStable: Boolean = !isUnstable
def isBeta: Boolean = beta.contains(this)
def isStable: Boolean = stable.contains(this)
def isDeleted: Boolean = deleted.contains(this)
@ -90,12 +106,18 @@ object ProtocolVersion {
type Status = S
}
private[version] def stable(v: Int): ProtocolVersionWithStatus[ProtocolVersionAnnotation.Stable] =
private[version] def createStable(
v: Int
): ProtocolVersionWithStatus[ProtocolVersionAnnotation.Stable] =
createWithStatus[ProtocolVersionAnnotation.Stable](v)
private[version] def unstable(
private[version] def createUnstable(
v: Int
): ProtocolVersionWithStatus[ProtocolVersionAnnotation.Unstable] =
createWithStatus[ProtocolVersionAnnotation.Unstable](v)
private[version] def createBeta(
v: Int
): ProtocolVersionWithStatus[ProtocolVersionAnnotation.Beta] =
createWithStatus[ProtocolVersionAnnotation.Beta](v)
private def createWithStatus[S <: ProtocolVersionAnnotation.Status](
v: Int
@ -121,12 +143,9 @@ object ProtocolVersion {
pv: ProtocolVersion,
includeDeleted: Boolean = false,
) = {
val supportedStablePVs = stableAndSupported.map(_.toString)
val deleted = Option.when(includeDeleted)(ProtocolVersion.deleted.forgetNE).getOrElse(Nil)
val supportedPVs = if (includeDeleted) {
val deletedPVs = deleted.map(pv => s"(${pv.toString})")
supportedStablePVs ++ deletedPVs
} else supportedStablePVs
val supportedPVs: NonEmpty[List[String]] = (supported ++ deleted).map(_.toString)
s"Protocol version $pv is not supported. The supported versions are ${supportedPVs.mkString(", ")}."
}
@ -201,13 +220,11 @@ object ProtocolVersion {
// All stable protocol versions supported by this release
// TODO(#15561) Switch to non-empty again
val stableAndSupported: List[ProtocolVersion] =
BuildInfo.protocolVersions
.map(parseUnchecked)
.map(_.valueOr(sys.error))
.toList
val stable: List[ProtocolVersion] =
parseFromBuildInfo(BuildInfo.stableProtocolVersions.toSeq)
private val deprecated: Seq[ProtocolVersion] = Seq()
private val deleted: NonEmpty[Seq[ProtocolVersion]] =
NonEmpty(
Seq,
@ -222,27 +239,36 @@ object ProtocolVersion {
val unstable: NonEmpty[List[ProtocolVersionWithStatus[ProtocolVersionAnnotation.Unstable]]] =
NonEmpty.mk(List, ProtocolVersion.v31, ProtocolVersion.dev)
val supported: NonEmpty[List[ProtocolVersion]] = (unstable ++ stableAndSupported).sorted
val beta: List[ProtocolVersionWithStatus[ProtocolVersionAnnotation.Beta]] =
parseFromBuildInfo(BuildInfo.betaProtocolVersions.toSeq)
.map(pv => ProtocolVersion.createBeta(pv.v))
private val allProtocolVersions = deprecated ++ deleted ++ unstable ++ stableAndSupported
val supported: NonEmpty[List[ProtocolVersion]] = (unstable ++ beta ++ stable).sorted
private val allProtocolVersions = deprecated ++ deleted ++ unstable ++ beta ++ stable
require(
allProtocolVersions.sizeCompare(allProtocolVersions.distinct) == 0,
s"All the protocol versions should be distinct." +
s"Found: ${Map("deprecated" -> deprecated, "deleted" -> deleted, "unstable" -> unstable, "stable" -> stableAndSupported)}",
s"Found: ${Map("deprecated" -> deprecated, "deleted" -> deleted, "unstable" -> unstable, "stable" -> stable)}",
)
// TODO(i15561): change back to `stableAndSupported.max1` once there is a stable Daml 3 protocol version
val latest: ProtocolVersion = stableAndSupported.lastOption.getOrElse(unstable.head1)
val latest: ProtocolVersion = stable.lastOption.getOrElse(unstable.head1)
lazy val dev: ProtocolVersionWithStatus[ProtocolVersionAnnotation.Unstable] =
ProtocolVersion.unstable(Int.MaxValue)
ProtocolVersion.createUnstable(Int.MaxValue)
lazy val v31: ProtocolVersionWithStatus[ProtocolVersionAnnotation.Unstable] =
ProtocolVersion.unstable(31)
ProtocolVersion.createUnstable(31)
// Minimum stable protocol version introduced
lazy val minimum: ProtocolVersion = v31
private def parseFromBuildInfo(pv: Seq[String]): List[ProtocolVersion] =
pv.map(parseUnchecked)
.map(_.valueOr(sys.error))
.toList
}
/*

View File

@ -1,3 +1,6 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates.
// Proprietary code. All rights reserved.
package com.daml.ledger.javaapi.data;
import org.checkerframework.checker.nullness.qual.NonNull;
@ -6,59 +9,57 @@ import java.util.Arrays;
import java.util.stream.Collectors;
public class PackageVersion implements Comparable<PackageVersion> {
private final int[] segments;
private final int[] segments;
/**
* Creates a PackageVersion from the provided segments.
* <p>
* This method is meant only for internal API usage.
* It is marked unsafe as it does not validate the input
* according to the accepted ledger format of PackageVersion.
*/
public PackageVersion(int[] segments) {
this.segments = segments;
}
/**
* Creates a PackageVersion from the provided segments.
*
* <p>This method is meant only for internal API usage. It is marked unsafe as it does not
* validate the input according to the accepted ledger format of PackageVersion.
*/
public PackageVersion(int[] segments) {
this.segments = segments;
}
/**
* Parses the provided String value into a PackageVersion.
* <p>
* This method is meant only for internal API usage.
* It is marked unsafe as it does not validate the input
* according to the accepted ledger format of PackageVersion.
*/
public static PackageVersion unsafeFromString(@NonNull String version) {
String[] parts = version.split("\\.");
int[] segments = new int[parts.length];
for (int i = 0; i < parts.length; i++) {
segments[i] = Integer.parseInt(parts[i]);
if (segments[i] < 0) {
throw new IllegalArgumentException("Invalid version. No negative segments allowed: " + version);
}
}
return new PackageVersion(segments);
/**
* Parses the provided String value into a PackageVersion.
*
* <p>This method is meant only for internal API usage. It is marked unsafe as it does not
* validate the input according to the accepted ledger format of PackageVersion.
*/
public static PackageVersion unsafeFromString(@NonNull String version) {
String[] parts = version.split("\\.");
int[] segments = new int[parts.length];
for (int i = 0; i < parts.length; i++) {
segments[i] = Integer.parseInt(parts[i]);
if (segments[i] < 0) {
throw new IllegalArgumentException(
"Invalid version. No negative segments allowed: " + version);
}
}
return new PackageVersion(segments);
}
@Override
public int compareTo(PackageVersion other) {
return Arrays.compare(this.segments, other.segments);
}
@Override
public int compareTo(PackageVersion other) {
return Arrays.compare(this.segments, other.segments);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PackageVersion that = (PackageVersion) o;
return Arrays.equals(segments, that.segments);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PackageVersion that = (PackageVersion) o;
return Arrays.equals(segments, that.segments);
}
@Override
public int hashCode() {
return Arrays.hashCode(segments);
}
@Override
public int hashCode() {
return Arrays.hashCode(segments);
}
@Override
public String toString() {
return Arrays.stream(segments).mapToObj(Integer::toString)
.collect(Collectors.joining("."));
}
@Override
public String toString() {
return Arrays.stream(segments).mapToObj(Integer::toString).collect(Collectors.joining("."));
}
}

View File

@ -1,3 +1,6 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates.
// Proprietary code. All rights reserved.
package com.daml.ledger.javaapi.data
import org.scalatest.flatspec.AnyFlatSpec

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8
sdk-version: 3.1.0-snapshot.20240625.13151.0.v74f04330
build-options:
- --target=2.1
name: CantonExamples

View File

@ -650,9 +650,9 @@ create table sequencer_lower_bound (
create table sequencer_events (
ts bigint primary key,
node_index smallint not null,
-- single char to indicate the event type: D for deliver event, E for deliver error
-- single char to indicate the event type: D for deliver event, E for deliver error, R for deliver receipt
event_type char(1) not null
constraint event_type_enum check (event_type = 'D' or event_type = 'E'),
constraint event_type_enum check (event_type IN ('D', 'E', 'R')),
message_id varchar null,
sender integer null,
-- null if event goes to everyone, otherwise specify member ids of recipients
@ -921,6 +921,8 @@ create table seq_traffic_control_consumed_journal (
extra_traffic_consumed bigint not null,
-- base traffic remainder at sequencing_timestamp
base_traffic_remainder bigint not null,
-- the last cost consumed at sequencing_timestamp
last_consumed_cost bigint not null,
-- traffic entries have a unique sequencing_timestamp per member
primary key (member, sequencing_timestamp)
);

View File

@ -1 +1 @@
8347bf5092167e6a3df9d8f3cf1d0054a779e272589f7c0f3aad50cca8f8736a
1923effb9fa5d583e6c188f401e708a5e9c03b725ed988d0928a0b61660854a2

View File

@ -81,7 +81,8 @@ CREATE TABLE lapi_command_completions (
trace_context BINARY LARGE OBJECT
);
CREATE INDEX lapi__command_completions_application_id_offset_idx ON lapi_command_completions USING btree (application_id, completion_offset);
CREATE INDEX lapi_command_completions_application_id_offset_idx ON lapi_command_completions USING btree (application_id, completion_offset);
CREATE INDEX lapi_command_completions_offset_idx ON lapi_command_completions USING btree (completion_offset);
---------------------------------------------------------------------------------------------------
-- Events: create

View File

@ -673,9 +673,9 @@ create table sequencer_lower_bound (
create table sequencer_events (
ts bigint primary key,
node_index smallint not null,
-- single char to indicate the event type: D for deliver event, E for deliver error
-- single char to indicate the event type: D for deliver event, E for deliver error, R for deliver receipt
event_type char(1) not null
constraint event_type_enum check (event_type = 'D' or event_type = 'E'),
constraint event_type_enum check (event_type IN ('D', 'E', 'R')),
message_id varchar(300) collate "C" null,
sender integer null,
-- null if event goes to everyone, otherwise specify member ids of recipients
@ -935,6 +935,8 @@ create table seq_traffic_control_consumed_journal (
extra_traffic_consumed bigint not null,
-- base traffic remainder at sequencing_timestamp
base_traffic_remainder bigint not null,
-- the last cost consumed at sequencing_timestamp
last_consumed_cost bigint not null,
-- traffic entries have a unique sequencing_timestamp per member
primary key (member, sequencing_timestamp)
);

View File

@ -1 +1 @@
22559de6824376d64006305601db270b57afafb1eccc05e041e55bf3cb858e30
1f50894cad8a5ce3e65f5e6b0a48484d2cf0cd7cc354fc6b0aa9cdda97d9e6d3

View File

@ -669,7 +669,8 @@ create or replace view debug.seq_traffic_control_consumed_journal as
member,
debug.canton_timestamp(sequencing_timestamp) as sequencing_timestamp,
extra_traffic_consumed,
base_traffic_remainder
base_traffic_remainder,
last_consumed_cost
from seq_traffic_control_consumed_journal;
create or replace view debug.seq_traffic_control_initial_timestamp as

View File

@ -1 +1 @@
f4d58cc709e08a2081d761637ea8d27393decb4ed1a6f4ee8ecf4843a838eab0
d1c0b524698a1e1249785b0fe973f21f5542020215b49c4012bd774e310fb82e

View File

@ -100,6 +100,7 @@ CREATE TABLE lapi_command_completions (
);
CREATE INDEX lapi_command_completions_application_id_offset_idx ON lapi_command_completions USING btree (application_id, completion_offset);
CREATE INDEX lapi_command_completions_offset_idx ON lapi_command_completions USING btree (completion_offset);
---------------------------------------------------------------------------------------------------
-- Events: Assign

View File

@ -3,6 +3,13 @@
package com.digitalasset.daml.lf
import com.digitalasset.canton.protocol.{
LfNode,
LfNodeId,
LfTransaction,
LfTransactionVersion,
LfVersionedTransaction,
}
import com.digitalasset.daml.lf.data.ImmArray
import com.digitalasset.daml.lf.data.Ref.PackageId
import com.digitalasset.daml.lf.language.{Ast, LanguageMajorVersion}
@ -15,13 +22,6 @@ import com.digitalasset.daml.lf.transaction.{
TransactionVersion,
}
import com.digitalasset.daml.lf.value.ValueCoder
import com.digitalasset.canton.protocol.{
LfNode,
LfNodeId,
LfTransaction,
LfTransactionVersion,
LfVersionedTransaction,
}
/** As part of upstream Daml-LF refactoring, previously accessible capabilities have become Canton-private. This
* enables Daml-LF to limit it's API surface area while still allowing Canton deeper visibility into transaction

View File

@ -5,5 +5,6 @@ package com.digitalasset.canton.config
trait ProtocolConfig {
def devVersionSupport: Boolean
def betaVersionSupport: Boolean
def dontWarnOnDeprecatedPV: Boolean
}

View File

@ -6,7 +6,6 @@ package com.digitalasset.canton.data
import cats.data.Chain
import cats.syntax.functor.*
import cats.syntax.functorFilter.*
import com.digitalasset.daml.lf.transaction.NodeId
import com.digitalasset.canton.LfPartyId
import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt}
import com.digitalasset.canton.data.TransactionViewDecomposition.{NewView, SameView}
@ -16,6 +15,7 @@ import com.digitalasset.canton.topology.ParticipantId
import com.digitalasset.canton.topology.client.TopologySnapshot
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.LfTransactionUtil
import com.digitalasset.daml.lf.transaction.NodeId
import scala.concurrent.{ExecutionContext, Future}

View File

@ -3,11 +3,11 @@
package com.digitalasset.canton.protocol
import com.digitalasset.daml.lf.data.Ref.PackageId
import com.digitalasset.canton.LfPackageId
import com.digitalasset.canton.config.CantonRequireTypes.String255
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.daml.lf.data.Ref.PackageId
import slick.jdbc.GetResult
import slick.jdbc.GetResult.GetInt

View File

@ -3,11 +3,11 @@
package com.digitalasset.canton.protocol
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.daml.lf.crypto
import com.digitalasset.daml.lf.data.{ImmArray, Time}
import com.digitalasset.daml.lf.transaction.NodeId
import com.digitalasset.daml.lf.transaction.Transaction.Metadata
import com.digitalasset.canton.data.CantonTimestamp
/** Collects the metadata of a LF transaction to the extent that is needed in Canton
*

View File

@ -7,7 +7,6 @@ import cats.data.{NonEmptyChain, Validated}
import cats.syntax.either.*
import cats.syntax.foldable.*
import cats.syntax.functor.*
import com.digitalasset.daml.lf.data.ImmArray
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.data.ActionDescription
import com.digitalasset.canton.protocol.RollbackContext.{RollbackScope, RollbackSibling}
@ -16,6 +15,7 @@ import com.digitalasset.canton.topology.PartyId
import com.digitalasset.canton.util.ShowUtil.*
import com.digitalasset.canton.util.{Checked, LfTransactionUtil, MonadUtil}
import com.digitalasset.canton.{checked, protocol}
import com.digitalasset.daml.lf.data.ImmArray
import scala.collection.immutable.HashMap
import scala.collection.mutable

View File

@ -12,7 +12,6 @@ import com.digitalasset.canton.sequencing.protocol.{
Batch,
MediatorGroupRecipient,
OpenEnvelope,
ParticipantsOfParty,
Recipients,
}
import com.digitalasset.canton.topology.client.TopologySnapshot
@ -57,32 +56,13 @@ final case class TransactionConfirmationRequest(
val rootHashMessageEnvelopes =
NonEmpty.from(recipientsOfRootHashMessage) match {
case Some(recipientsNE) =>
// TODO(#13883) Use BCC also for group addresses
// val groupsWithMediator =
// recipientsOfRootHashMessage.map(recipient => NonEmpty(Set, recipient, mediatorRecipient))
// val rootHashMessageEnvelope = OpenEnvelope(
// rootHashMessage,
// Recipients.recipientGroups(NonEmptyUtil.fromUnsafe(groupsWithMediator)),
// )(protocolVersion)
val groupAddressing = recipientsOfRootHashMessage.exists {
case ParticipantsOfParty(_) => true
case _ => false
}
// if using group addressing, we just place all recipients in one group instead of separately as before (it was separate for legacy reasons)
val rootHashMessageRecipients =
if (groupAddressing)
Recipients.recipientGroups(
NonEmpty.mk(Seq, recipientsNE.toSet ++ Seq(mediator))
)
else
Recipients.recipientGroups(
recipientsNE.map(NonEmpty.mk(Set, _, mediator))
)
List(
OpenEnvelope(rootHashMessage(ipsSnapshot.timestamp), rootHashMessageRecipients)(
protocolVersion
)
)
val groupsWithMediator = recipientsNE.map(NonEmpty(Set, _, mediator))
val rootHashMessageEnvelope = OpenEnvelope(
rootHashMessage(ipsSnapshot.timestamp),
Recipients.recipientGroups(groupsWithMediator),
)(protocolVersion)
List(rootHashMessageEnvelope)
case None =>
loggingContext.warn("Confirmation request without root hash message recipients")
List.empty

View File

@ -117,7 +117,7 @@ class QueueBasedDomainOutbox(
private def hasUnsentTransactions: Boolean = domainOutboxQueue.numUnsentTransactions > 0
def newTransactionsAddedToAuthorizedStore(
def newTransactionsAdded(
asOf: CantonTimestamp,
num: Int,
): FutureUnlessShutdown[Unit] = {

View File

@ -162,7 +162,7 @@ class StoreBasedDomainOutbox(
final def queueSize: Int = watermarks.get().queuedApprox
final def newTransactionsAddedToAuthorizedStore(
final def newTransactionsAdded(
asOf: CantonTimestamp,
num: Int,
): FutureUnlessShutdown[Unit] = {
@ -375,7 +375,7 @@ abstract class DomainOutbox extends DomainOutboxHandle {
def targetClient: DomainTopologyClientWithInit
def newTransactionsAddedToAuthorizedStore(
def newTransactionsAdded(
asOf: CantonTimestamp,
num: Int,
): FutureUnlessShutdown[Unit]
@ -396,7 +396,7 @@ class DomainOutboxDynamicObserver(val loggerFactory: NamedLoggerFactory)
transactions: Seq[SignedTopologyTransaction[TopologyChangeOp, TopologyMapping]],
)(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = {
outboxRef.get.fold(FutureUnlessShutdown.unit)(
_.newTransactionsAddedToAuthorizedStore(timestamp, transactions.size)
_.newTransactionsAdded(timestamp, transactions.size)
)
}

View File

@ -26,9 +26,12 @@ object ProtocolVersionCompatibility {
cantonNodeParameters: CantonNodeParameters,
release: ReleaseVersion = ReleaseVersion.current,
): NonEmpty[List[ProtocolVersion]] = {
val unstable =
val unstableAndBeta =
if (cantonNodeParameters.devVersionSupport && cantonNodeParameters.nonStandardConfig)
ProtocolVersion.unstable.forgetNE
ProtocolVersion.unstable.forgetNE ++ ReleaseVersionToProtocolVersions
.getBetaProtocolVersions(release)
else if (cantonNodeParameters.betaVersionSupport)
ReleaseVersionToProtocolVersions.getBetaProtocolVersions(release)
else List.empty
ReleaseVersionToProtocolVersions.getOrElse(
@ -36,15 +39,23 @@ object ProtocolVersionCompatibility {
sys.error(
s"Please add the supported protocol versions of a participant of release version $release to `majorMinorToProtocolVersions` in `ReleaseVersionToProtocolVersions.scala`."
),
) ++ unstable
) ++ unstableAndBeta
}
/** Returns the protocol versions supported by the participant of the current release.
/** Returns the protocol versions supported by the participant of the specified release.
* includeUnstableVersions: include unstable versions
* includeBetaVersions: include Beta versions
*/
def supportedProtocolsParticipant(
includeUnstableVersions: Boolean,
includeBetaVersions: Boolean,
release: ReleaseVersion,
): NonEmpty[List[ProtocolVersion]] = {
val beta =
if (includeBetaVersions)
ReleaseVersionToProtocolVersions.getBetaProtocolVersions(release)
else List.empty
val unstable =
if (includeUnstableVersions)
ProtocolVersion.unstable.forgetNE
@ -55,7 +66,7 @@ object ProtocolVersionCompatibility {
sys.error(
s"Please add the supported protocol versions of a participant of release version $release to `majorMinorToProtocolVersions` in `ReleaseVersionToProtocolVersions.scala`."
),
) ++ unstable
) ++ beta ++ unstable
}
/** Returns the protocol versions supported by the domain of the current release.
@ -65,9 +76,12 @@ object ProtocolVersionCompatibility {
cantonNodeParameters: CantonNodeParameters,
release: ReleaseVersion = ReleaseVersion.current,
): NonEmpty[List[ProtocolVersion]] = {
val unstable =
val unstableAndBeta =
if (cantonNodeParameters.devVersionSupport && cantonNodeParameters.nonStandardConfig)
ProtocolVersion.unstable.forgetNE
ProtocolVersion.unstable.forgetNE ++ ReleaseVersionToProtocolVersions
.getBetaProtocolVersions(release)
else if (cantonNodeParameters.betaVersionSupport)
ReleaseVersionToProtocolVersions.getBetaProtocolVersions(release)
else List.empty
ReleaseVersionToProtocolVersions.getOrElse(
@ -75,16 +89,23 @@ object ProtocolVersionCompatibility {
sys.error(
s"Please add the supported protocol versions of domain nodes of release version $release to `majorMinorToProtocolVersions` in `ReleaseVersionToProtocolVersions.scala`."
),
) ++ unstable
) ++ unstableAndBeta
}
/** Returns the protocol versions supported by the domain of the current release.
* Fails if no stable protocol versions are found
/** Returns the protocol versions supported by the domain of the specified release.
* includeUnstableVersions: include unstable versions
* includeBetaVersions: include beta versions
*/
def trySupportedProtocolsDomain(
includeUnstableVersions: Boolean,
includeBetaVersions: Boolean,
release: ReleaseVersion,
): NonEmpty[List[ProtocolVersion]] = {
val beta =
if (includeBetaVersions)
ReleaseVersionToProtocolVersions.getBetaProtocolVersions(release)
else List.empty
val unstable =
if (includeUnstableVersions)
ProtocolVersion.unstable.forgetNE
@ -95,7 +116,7 @@ object ProtocolVersionCompatibility {
sys.error(
s"Please add the supported protocol versions of domain nodes of release version $release to `majorMinorToProtocolVersions` in `ReleaseVersionToProtocolVersions.scala`."
),
) ++ unstable
) ++ beta ++ unstable
}
final case class UnsupportedVersion(version: ProtocolVersion, supported: Seq[ProtocolVersion])
@ -222,6 +243,7 @@ object DomainProtocolVersion {
ProtocolVersionCompatibility
.trySupportedProtocolsDomain(
includeUnstableVersions = true,
includeBetaVersions = true,
release = ReleaseVersion.current,
)
.contains(version),
@ -229,7 +251,8 @@ object DomainProtocolVersion {
UnsupportedVersion(
version,
ProtocolVersionCompatibility.trySupportedProtocolsDomain(
includeUnstableVersions = false,
includeUnstableVersions = true,
includeBetaVersions = true,
release = ReleaseVersion.current,
),
),
@ -261,6 +284,7 @@ object ParticipantProtocolVersion {
ProtocolVersionCompatibility
.supportedProtocolsParticipant(
includeUnstableVersions = true,
includeBetaVersions = true,
release = ReleaseVersion.current,
)
.contains(version),
@ -268,7 +292,8 @@ object ParticipantProtocolVersion {
UnsupportedVersion(
version,
ProtocolVersionCompatibility.supportedProtocolsParticipant(
includeUnstableVersions = false,
includeUnstableVersions = true,
includeBetaVersions = true,
release = ReleaseVersion.current,
),
),

View File

@ -10,46 +10,42 @@ object ReleaseVersionToProtocolVersions {
private val v3 = ProtocolVersion(3)
private val v4 = ProtocolVersion(4)
private val v5 = ProtocolVersion(5)
private val v6 = ProtocolVersion(6)
private val v30 = ProtocolVersion(30)
import ProtocolVersion.*
// For each (major, minor) the list of supported protocol versions
// Don't make this variable private because it's used in `console-reference.canton`
val majorMinorToProtocolVersions: Map[(Int, Int), NonEmpty[List[ProtocolVersion]]] = Map(
ReleaseVersions.v2_0_0 -> List(v2),
ReleaseVersions.v2_1_0 -> List(v2),
ReleaseVersions.v2_2_0 -> List(v2),
ReleaseVersions.v2_3_0 -> List(v2, v3),
ReleaseVersions.v2_4_0 -> List(v2, v3),
ReleaseVersions.v2_5_0 -> List(v2, v3, v4),
ReleaseVersions.v2_6_0 -> List(v3, v4),
ReleaseVersions.v2_7_0 -> List(v3, v4, v5),
ReleaseVersions.v2_8_0 -> List(v3, v4, v5),
ReleaseVersions.v2_9_0 -> List(v3, v4, v5),
ReleaseVersions.v3_0_0 -> List(v30),
ReleaseVersions.v3_1_0_snapshot -> List(v31),
val majorMinorToStableProtocolVersions: Map[(Int, Int), NonEmpty[List[ProtocolVersion]]] =
Map(
ReleaseVersions.v2_0_0 -> List(v2),
ReleaseVersions.v2_1_0 -> List(v2),
ReleaseVersions.v2_2_0 -> List(v2),
ReleaseVersions.v2_3_0 -> List(v2, v3),
ReleaseVersions.v2_4_0 -> List(v2, v3),
ReleaseVersions.v2_5_0 -> List(v2, v3, v4),
ReleaseVersions.v2_6_0 -> List(v3, v4),
ReleaseVersions.v2_7_0 -> List(v3, v4, v5),
ReleaseVersions.v2_8_0 -> List(v3, v4, v5),
ReleaseVersions.v2_9_0 -> List(v5),
ReleaseVersions.v3_0_0 -> List(v30),
ReleaseVersions.v3_1_0_snapshot -> List(v31),
).map { case (release, pvs) => (release.majorMinor, NonEmptyUtil.fromUnsafe(pvs)) }
val majorMinorToBetaProtocolVersions: Map[(Int, Int), NonEmpty[List[ProtocolVersion]]] = Map(
ReleaseVersions.v2_9_0 -> List(v6)
).map { case (release, pvs) => (release.majorMinor, NonEmptyUtil.fromUnsafe(pvs)) }
def get(
releaseVersion: ReleaseVersion,
includeDeletedProtocolVersions: Boolean = false,
): Option[NonEmpty[List[ProtocolVersion]]] = {
val allVersions = majorMinorToProtocolVersions
.get(releaseVersion.majorMinor)
if (includeDeletedProtocolVersions)
allVersions
else
majorMinorToProtocolVersions
.get(releaseVersion.majorMinor)
.map(_.filterNot(_.isDeleted))
.flatMap(NonEmpty.from)
}
def getOrElse(
releaseVersion: ReleaseVersion,
default: => NonEmpty[List[ProtocolVersion]],
): NonEmpty[List[ProtocolVersion]] =
majorMinorToProtocolVersions.getOrElse(releaseVersion.majorMinor, default)
majorMinorToStableProtocolVersions.getOrElse(releaseVersion.majorMinor, default)
def getBetaProtocolVersions(releaseVersion: ReleaseVersion): List[ProtocolVersion] =
majorMinorToBetaProtocolVersions
.get(releaseVersion.majorMinor)
.map(_.forgetNE)
.getOrElse(Nil)
}

View File

@ -19,7 +19,7 @@ class RecipientsTreeTest extends AnyWordSpec with BaseTest {
lazy val p6: Member = ParticipantId("participant6")
lazy val alice = PartyId(UniqueIdentifier.tryFromProtoPrimitive(s"alice::party"))
lazy val bob = PartyId(UniqueIdentifier.tryFromProtoPrimitive(s"alice::bob"))
lazy val bob = PartyId(UniqueIdentifier.tryFromProtoPrimitive(s"bob::party"))
lazy val pop1: ParticipantsOfParty = ParticipantsOfParty(alice)
lazy val pop2: ParticipantsOfParty = ParticipantsOfParty(bob)
@ -51,6 +51,16 @@ class RecipientsTreeTest extends AnyWordSpec with BaseTest {
t5.forMember(p5, Set(pop1)) shouldBe List(t5)
}
}
"allPaths" should {
"give all paths within the tree" in {
t5.allPaths shouldBe Seq(
Seq(Set(rec(p1), pop1), Set(rec(p4), rec(p2), pop2), Set(rec(p1), rec(p5))),
Seq(Set(rec(p1), pop1), Set(rec(p4), rec(p2), pop2), Set(rec(p3))),
Seq(Set(rec(p1), pop1), Set(rec(p2), rec(p6), pop2)),
)
}
}
}
"serialization and deserialization" should {

View File

@ -4,14 +4,17 @@
package com.digitalasset.canton
import com.daml.ledger.javaapi.data.Identifier
import com.digitalasset.daml.lf.data.{FrontStack, ImmArray}
import com.digitalasset.daml.lf.transaction.NodeId
import com.digitalasset.daml.lf.transaction.test.NodeIdTransactionBuilder
import com.digitalasset.daml.lf.transaction.test.TransactionBuilder.Implicits.{toIdentifier, toPackageId}
import com.digitalasset.canton.ComparesLfTransactions.TxTree
import com.digitalasset.canton.logging.pretty.PrettyTestInstances.*
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.protocol.*
import com.digitalasset.daml.lf.data.{FrontStack, ImmArray}
import com.digitalasset.daml.lf.transaction.NodeId
import com.digitalasset.daml.lf.transaction.test.NodeIdTransactionBuilder
import com.digitalasset.daml.lf.transaction.test.TransactionBuilder.Implicits.{
toIdentifier,
toPackageId,
}
import org.scalatest.{Assertion, Suite}
/** Test utility to compare actual and expected lf transactions using a human-readable, hierarchical serialization of lf

View File

@ -4,7 +4,6 @@
package com.digitalasset.canton
import cats.Id
import com.digitalasset.daml.lf.data.{ImmArray, Ref}
import com.digitalasset.canton.data.DeduplicationPeriod.DeduplicationDuration
import com.digitalasset.canton.protocol.{
LfCommittedTransaction,
@ -13,6 +12,7 @@ import com.digitalasset.canton.protocol.{
LfTransactionVersion,
LfVersionedTransaction,
}
import com.digitalasset.daml.lf.data.{ImmArray, Ref}
/** Default values for objects from the Daml repo for unit testing */
object DefaultDamlValues {

View File

@ -3,8 +3,6 @@
package com.digitalasset.canton
import com.digitalasset.daml.lf.transaction.Versioned
import com.digitalasset.daml.lf.value.Value.ValueInt64
import com.digitalasset.canton.crypto.{Hash, HashAlgorithm, TestHash}
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.protocol.{
@ -18,6 +16,8 @@ import com.digitalasset.canton.protocol.{
Unicum,
}
import com.digitalasset.canton.topology.PartyId
import com.digitalasset.daml.lf.transaction.Versioned
import com.digitalasset.daml.lf.value.Value.ValueInt64
import org.scalacheck.{Arbitrary, Gen}
object GeneratorsLf {

View File

@ -3,13 +3,13 @@
package com.digitalasset.canton.data
import com.digitalasset.daml.lf.value.Value
import com.digitalasset.canton.data.ActionDescription.*
import com.digitalasset.canton.protocol.*
import com.digitalasset.canton.util.LfTransactionBuilder
import com.digitalasset.canton.util.LfTransactionBuilder.defaultTemplateId
import com.digitalasset.canton.version.RepresentativeProtocolVersion
import com.digitalasset.canton.{BaseTest, LfPackageName, LfVersioned}
import com.digitalasset.daml.lf.value.Value
import org.scalatest.wordspec.AnyWordSpec
class ActionDescriptionTest extends AnyWordSpec with BaseTest {

View File

@ -3,7 +3,6 @@
package com.digitalasset.canton.data
import com.digitalasset.daml.lf.value.Value.ValueInt64
import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt}
import com.digitalasset.canton.crypto.{GeneratorsCrypto, Salt, TestHash}
import com.digitalasset.canton.data.ActionDescription.{
@ -28,6 +27,7 @@ import com.digitalasset.canton.util.SeqUtil
import com.digitalasset.canton.version.Transfer.{SourceProtocolVersion, TargetProtocolVersion}
import com.digitalasset.canton.version.{ProtocolVersion, RepresentativeProtocolVersion}
import com.digitalasset.canton.{LfInterfaceId, LfPackageId, LfPartyId, LfVersioned}
import com.digitalasset.daml.lf.value.Value.ValueInt64
import magnolify.scalacheck.auto.*
import org.scalacheck.{Arbitrary, Gen}
import org.scalatest.EitherValues.*

View File

@ -53,12 +53,14 @@ final class GeneratorsTrafficData(
extraTrafficLimit <- Arbitrary.arbitrary[NonNegativeLong]
extraTrafficConsumed <- Arbitrary.arbitrary[NonNegativeLong]
baseTrafficRemainder <- Arbitrary.arbitrary[NonNegativeLong]
lastConsumedCost <- Arbitrary.arbitrary[NonNegativeLong]
timestamp <- Arbitrary.arbitrary[CantonTimestamp]
serial <- Arbitrary.arbitrary[Option[PositiveInt]]
} yield TrafficState(
extraTrafficLimit,
extraTrafficConsumed,
baseTrafficRemainder,
lastConsumedCost,
timestamp,
serial,
)

View File

@ -3,9 +3,6 @@
package com.digitalasset.canton.data
import com.digitalasset.daml.lf.transaction.test.TestNodeBuilder.CreateKey
import com.digitalasset.daml.lf.transaction.test.TreeTransactionBuilder.NodeWrapper
import com.digitalasset.daml.lf.transaction.test.{TestIdFactory, TestNodeBuilder, TreeTransactionBuilder}
import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt}
import com.digitalasset.canton.data.TransactionViewDecomposition.*
import com.digitalasset.canton.protocol.RollbackContext.{RollbackScope, RollbackSibling}
@ -20,6 +17,13 @@ import com.digitalasset.canton.{
LfValue,
NeedsNewLfContractIds,
}
import com.digitalasset.daml.lf.transaction.test.TestNodeBuilder.CreateKey
import com.digitalasset.daml.lf.transaction.test.TreeTransactionBuilder.NodeWrapper
import com.digitalasset.daml.lf.transaction.test.{
TestIdFactory,
TestNodeBuilder,
TreeTransactionBuilder,
}
import org.scalatest.wordspec.AnyWordSpec
class TransactionViewDecompositionTest

Some files were not shown because too many files have changed in this diff Show More