Rename com.daml.lf to com.digitalasset.daml.lf (#19431)

* Rename com.daml => com.digitalasset.daml for lf
This commit is contained in:
Simon Maxen 2024-06-25 19:20:04 +01:00 committed by GitHub
parent f0d34ca060
commit 9f1b9886e2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1001 changed files with 7540 additions and 6274 deletions

View File

@ -60,7 +60,7 @@ using `da_scala_binary` as shown below.
```
da_scala_binary(
name = "daml-script-binary",
main_class = "com.daml.lf.engine.script.ScriptMain",
main_class = "com.digitalasset.daml.lf.engine.script.ScriptMain",
resources = glob(["src/main/resources/**/*"]),
scala_runtime_deps = [
"@maven//:org_apache_pekko_pekko_slf4j",

View File

@ -31,10 +31,8 @@ message TrafficState {
int64 extra_traffic_consumed = 2;
// Amount of base traffic remaining
int64 base_traffic_remainder = 3;
// Cost deducted at `timestamp`, only present when traffic was consumed at `timestamp`, otherwise is set to 0
uint64 last_consumed_cost = 4;
// Timestamp at which the state is valid
int64 timestamp = 5;
int64 timestamp = 4;
// Optional serial of the balance update that updated the extra traffic limit
google.protobuf.UInt32Value serial = 6;
google.protobuf.UInt32Value serial = 5;
}

View File

@ -4,14 +4,6 @@
package com.digitalasset.canton.admin.api.client.commands
import cats.syntax.either.*
import cats.syntax.traverse.*
import com.daml.ledger.api.v2.admin.command_inspection_service.CommandInspectionServiceGrpc.CommandInspectionServiceStub
import com.daml.ledger.api.v2.admin.command_inspection_service.{
CommandInspectionServiceGrpc,
CommandState,
GetCommandStatusRequest,
GetCommandStatusResponse,
}
import com.daml.ledger.api.v2.admin.identity_provider_config_service.IdentityProviderConfigServiceGrpc.IdentityProviderConfigServiceStub
import com.daml.ledger.api.v2.admin.identity_provider_config_service.*
import com.daml.ledger.api.v2.admin.metering_report_service.MeteringReportServiceGrpc.MeteringReportServiceStub
@ -143,7 +135,6 @@ import com.digitalasset.canton.ledger.client.services.admin.IdentityProviderConf
import com.digitalasset.canton.logging.ErrorLoggingContext
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.networking.grpc.ForwardingStreamObserver
import com.digitalasset.canton.platform.apiserver.execution.CommandStatus
import com.digitalasset.canton.protocol.LfContractId
import com.digitalasset.canton.serialization.ProtoConverter
import com.digitalasset.canton.topology.{DomainId, PartyId}
@ -361,34 +352,6 @@ object LedgerApiCommands {
}
}
object CommandInspectionService {
abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] {
override type Svc = CommandInspectionServiceStub
override def createService(channel: ManagedChannel): CommandInspectionServiceStub =
CommandInspectionServiceGrpc.stub(channel)
}
final case class GetCommandStatus(commandIdPrefix: String, state: CommandState, limit: Int)
extends BaseCommand[GetCommandStatusRequest, GetCommandStatusResponse, Seq[CommandStatus]] {
override def createRequest(): Either[String, GetCommandStatusRequest] = Right(
GetCommandStatusRequest(commandIdPrefix = commandIdPrefix, state = state, limit = limit)
)
override def submitRequest(
service: CommandInspectionServiceStub,
request: GetCommandStatusRequest,
): Future[GetCommandStatusResponse] = service.getCommandStatus(request)
override def handleResponse(
response: GetCommandStatusResponse
): Either[String, Seq[CommandStatus]] = {
response.commandStatus.traverse(CommandStatus.fromProto).leftMap(_.message)
}
}
}
object ParticipantPruningService {
abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] {
override type Svc = ParticipantPruningServiceStub

View File

@ -12,7 +12,7 @@ import com.daml.ledger.api.v2.state_service.{
IncompleteUnassigned,
}
import com.daml.ledger.api.v2.value.{Record, RecordField, Value}
import com.daml.lf.data.Time
import com.digitalasset.daml.lf.data.Time
import com.digitalasset.canton.admin.api.client.data.TemplateId
import com.digitalasset.canton.crypto.Salt
import com.digitalasset.canton.protocol.LfContractId

View File

@ -70,7 +70,6 @@ import com.digitalasset.canton.participant.ParticipantNodeParameters
import com.digitalasset.canton.participant.admin.AdminWorkflowConfig
import com.digitalasset.canton.participant.config.ParticipantInitConfig.ParticipantLedgerApiInitConfig
import com.digitalasset.canton.participant.config.*
import com.digitalasset.canton.participant.sync.CommandProgressTrackerConfig
import com.digitalasset.canton.platform.apiserver.SeedService.Seeding
import com.digitalasset.canton.platform.apiserver.configuration.{
EngineLoggingConfig,
@ -389,7 +388,6 @@ trait CantonConfig {
disableUpgradeValidation = participantParameters.disableUpgradeValidation,
allowForUnauthenticatedContractIds =
participantParameters.allowForUnauthenticatedContractIds,
commandProgressTracking = participantParameters.commandProgressTracker,
)
}
@ -973,12 +971,9 @@ object CantonConfig {
deriveReader[EngineLoggingConfig]
lazy implicit val cantonEngineConfigReader: ConfigReader[CantonEngineConfig] =
deriveReader[CantonEngineConfig]
@nowarn("cat=unused") lazy implicit val participantNodeParameterConfigReader
: ConfigReader[ParticipantNodeParameterConfig] = {
implicit val commandProgressTrackerConfigReader: ConfigReader[CommandProgressTrackerConfig] =
deriveReader[CommandProgressTrackerConfig]
lazy implicit val participantNodeParameterConfigReader
: ConfigReader[ParticipantNodeParameterConfig] =
deriveReader[ParticipantNodeParameterConfig]
}
lazy implicit val timeTrackerConfigReader: ConfigReader[DomainTimeTrackerConfig] =
deriveReader[DomainTimeTrackerConfig]
lazy implicit val timeRequestConfigReader: ConfigReader[TimeProofRequestConfig] =
@ -1390,12 +1385,9 @@ object CantonConfig {
deriveWriter[EngineLoggingConfig]
lazy implicit val cantonEngineConfigWriter: ConfigWriter[CantonEngineConfig] =
deriveWriter[CantonEngineConfig]
@nowarn("cat=unused") lazy implicit val participantNodeParameterConfigWriter
: ConfigWriter[ParticipantNodeParameterConfig] = {
implicit val commandProgressTrackerConfigWriter: ConfigWriter[CommandProgressTrackerConfig] =
deriveWriter[CommandProgressTrackerConfig]
lazy implicit val participantNodeParameterConfigWriter
: ConfigWriter[ParticipantNodeParameterConfig] =
deriveWriter[ParticipantNodeParameterConfig]
}
lazy implicit val timeTrackerConfigWriter: ConfigWriter[DomainTimeTrackerConfig] =
deriveWriter[DomainTimeTrackerConfig]
lazy implicit val timeRequestConfigWriter: ConfigWriter[TimeProofRequestConfig] =

View File

@ -20,7 +20,7 @@ import com.daml.ledger.api.v2.value.{
RecordField,
Value,
}
import com.daml.lf.value.Value.ContractId
import com.digitalasset.daml.lf.value.Value.ContractId
import com.daml.nonempty.NonEmpty
import com.daml.nonempty.NonEmptyReturningOps.*
import com.digitalasset.canton.admin.api.client.commands.LedgerApiTypeWrappers.ContractData

View File

@ -8,7 +8,6 @@ import cats.syntax.functorFilter.*
import cats.syntax.traverse.*
import com.daml.jwt.JwtDecoder
import com.daml.jwt.domain.Jwt
import com.daml.ledger.api.v2.admin.command_inspection_service.CommandState
import com.daml.ledger.api.v2.admin.package_management_service.PackageDetails
import com.daml.ledger.api.v2.admin.party_management_service.PartyDetails as ProtoPartyDetails
import com.daml.ledger.api.v2.checkpoint.Checkpoint
@ -40,7 +39,7 @@ import com.daml.ledger.javaapi.data.{
TransactionTree,
}
import com.daml.ledger.javaapi as javab
import com.daml.lf.data.Ref
import com.digitalasset.daml.lf.data.Ref
import com.daml.metrics.api.MetricsContext
import com.daml.scalautil.Statement.discard
import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands.CompletionWrapper
@ -84,7 +83,6 @@ import com.digitalasset.canton.ledger.client.services.admin.IdentityProviderConf
import com.digitalasset.canton.logging.NamedLogging
import com.digitalasset.canton.networking.grpc.{GrpcError, RecordingStreamObserver}
import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil
import com.digitalasset.canton.platform.apiserver.execution.CommandStatus
import com.digitalasset.canton.protocol.LfContractId
import com.digitalasset.canton.topology.{DomainId, ParticipantId, PartyId}
import com.digitalasset.canton.tracing.NoTracing
@ -556,36 +554,6 @@ trait BaseLedgerApiAdministration extends NoTracing {
}
}
@Help.Summary("Investigate successful and failed commands", FeatureFlag.Testing)
@Help.Description(
"""Find the status of commands. Note that only recent commands which are kept in memory will be returned."""
)
def status(
commandIdPrefix: String = "",
state: CommandState = CommandState.COMMAND_STATE_UNSPECIFIED,
limit: PositiveInt = PositiveInt.tryCreate(10),
): Seq[CommandStatus] = check(FeatureFlag.Preview) {
consoleEnvironment.run {
ledgerApiCommand(
LedgerApiCommands.CommandInspectionService.GetCommandStatus(
commandIdPrefix = commandIdPrefix,
state = state,
limit = limit.unwrap,
)
)
}
}
@Help.Summary("Investigate failed commands", FeatureFlag.Testing)
@Help.Description(
"""Same as status(..., state = CommandState.Failed)."""
)
def failed(commandId: String = "", limit: PositiveInt = PositiveInt.tryCreate(10)): Seq[
CommandStatus
] = check(FeatureFlag.Preview) {
status(commandId, CommandState.COMMAND_STATE_FAILED, limit)
}
@Help.Summary(
"Submit assign command and wait for the resulting reassignment, returning the reassignment or failing otherwise",
FeatureFlag.Testing,
@ -828,36 +796,6 @@ trait BaseLedgerApiAdministration extends NoTracing {
)
})
@Help.Summary("Investigate successful and failed commands", FeatureFlag.Testing)
@Help.Description(
"""Find the status of commands. Note that only recent commands which are kept in memory will be returned."""
)
def status(
commandIdPrefix: String = "",
state: CommandState = CommandState.COMMAND_STATE_UNSPECIFIED,
limit: PositiveInt = PositiveInt.tryCreate(10),
): Seq[CommandStatus] = check(FeatureFlag.Preview) {
consoleEnvironment.run {
ledgerApiCommand(
LedgerApiCommands.CommandInspectionService.GetCommandStatus(
commandIdPrefix = commandIdPrefix,
state = state,
limit = limit.unwrap,
)
)
}
}
@Help.Summary("Investigate failed commands", FeatureFlag.Testing)
@Help.Description(
"""Same as status(..., state = CommandState.Failed)."""
)
def failed(commandId: String = "", limit: PositiveInt = PositiveInt.tryCreate(10)): Seq[
CommandStatus
] = check(FeatureFlag.Preview) {
status(commandId, CommandState.COMMAND_STATE_FAILED, limit)
}
@Help.Summary("Read active contracts", FeatureFlag.Testing)
@Help.Group("Active Contracts")
object acs extends Helpful {

View File

@ -309,7 +309,7 @@ class ParticipantPartiesAdministrationGroup(
TopologyAdminCommands.Write.Propose(
// TODO(#14048) properly set the serial or introduce auto-detection so we don't
// have to set it on the client side
mapping = PartyToParticipant.create(
mapping = PartyToParticipant(
partyId,
None,
threshold,
@ -326,8 +326,6 @@ class ParticipantPartiesAdministrationGroup(
serial = None,
store = AuthorizedStore.filterName,
mustFullyAuthorize = mustFullyAuthorize,
change = TopologyChangeOp.Replace,
forceChanges = ForceFlags.none,
)
)
}

View File

@ -4,7 +4,7 @@
package com.digitalasset.canton.console.commands
import cats.syntax.either.*
import com.daml.lf.data.Ref.PackageId
import com.digitalasset.daml.lf.data.Ref.PackageId
import com.daml.nameof.NameOf.functionFullName
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.admin.api.client.commands.{GrpcAdminCommand, TopologyAdminCommands}
@ -1209,29 +1209,6 @@ class TopologyAdministrationGroup(
@Help.Group("Party to participant mappings")
object party_to_participant_mappings extends Helpful {
private def findCurrent(party: PartyId, store: String) = {
TopologyStoreId(store) match {
case TopologyStoreId.DomainStore(domainId, _) =>
expectAtMostOneResult(
list(
domainId,
filterParty = party.filterString,
// fetch both REPLACE and REMOVE to correctly determine the next serial
operation = None,
)
)
case TopologyStoreId.AuthorizedStore =>
expectAtMostOneResult(
list_from_authorized(
filterParty = party.filterString,
// fetch both REPLACE and REMOVE to correctly determine the next serial
operation = None,
)
)
}
}
@Help.Summary("Change party to participant mapping")
@Help.Description("""Change the association of a party to hosting participants.
party: The unique identifier of the party whose set of participants or permission to modify.
@ -1267,7 +1244,27 @@ class TopologyAdministrationGroup(
store: String = AuthorizedStore.filterName,
): SignedTopologyTransaction[TopologyChangeOp, PartyToParticipant] = {
val currentO = findCurrent(party, store)
val currentO = TopologyStoreId(store) match {
case TopologyStoreId.DomainStore(domainId, _) =>
expectAtMostOneResult(
list(
domainId,
filterParty = party.filterString,
// fetch both REPLACE and REMOVE to correctly determine the next serial
operation = None,
)
)
case TopologyStoreId.AuthorizedStore =>
expectAtMostOneResult(
list_from_authorized(
filterParty = party.filterString,
// fetch both REPLACE and REMOVE to correctly determine the next serial
operation = None,
)
)
}
val (existingPermissions, newSerial, threshold, groupAddressing) = currentO match {
case Some(current) if current.context.operation == TopologyChangeOp.Remove =>
(
@ -1364,7 +1361,7 @@ class TopologyAdministrationGroup(
}
val command = TopologyAdminCommands.Write.Propose(
mapping = PartyToParticipant.create(
mapping = PartyToParticipant(
partyId = party,
domainId = domainId,
threshold = threshold,
@ -1376,7 +1373,6 @@ class TopologyAdministrationGroup(
change = op,
mustFullyAuthorize = mustFullyAuthorize,
store = store,
forceChanges = ForceFlags.none,
)
synchronisation.runAdminCommand(synchronize)(command)
@ -1973,16 +1969,13 @@ class TopologyAdministrationGroup(
),
): SignedTopologyTransaction[TopologyChangeOp, AuthorityOf] = {
val authorityOf = AuthorityOf
.create(
val command = TopologyAdminCommands.Write.Propose(
AuthorityOf(
partyId,
domainId,
PositiveInt.tryCreate(threshold),
parties,
)
.valueOr(error => consoleEnvironment.run(GenericCommandError(error)))
val command = TopologyAdminCommands.Write.Propose(
authorityOf,
),
signedBy = signedBy.toList,
serial = serial,
store = store,

View File

@ -82,7 +82,6 @@ trait Environment extends NamedLogging with AutoCloseable with NoTracing {
histogramInventory = histogramInventory,
histogramFilter = baseFilter,
histogramConfigs = config.monitoring.metrics.histograms,
config.monitoring.metrics.cardinality.unwrap,
loggerFactory,
)
}

View File

@ -12,14 +12,13 @@ import com.daml.metrics.api.{MetricQualification, MetricsContext, MetricsInfoFil
import com.daml.metrics.grpc.DamlGrpcServerMetrics
import com.daml.metrics.{HealthMetrics, HistogramDefinition, MetricsFilterConfig}
import com.digitalasset.canton.config.NonNegativeFiniteDuration
import com.digitalasset.canton.config.RequireTypes.{Port, PositiveInt}
import com.digitalasset.canton.config.RequireTypes.Port
import com.digitalasset.canton.discard.Implicits.DiscardOps
import com.digitalasset.canton.domain.metrics.{MediatorMetrics, SequencerMetrics}
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.metrics.MetricsConfig.JvmMetrics
import com.digitalasset.canton.metrics.MetricsReporterConfig.{Csv, Logging, Prometheus}
import com.digitalasset.canton.participant.metrics.ParticipantMetrics
import com.digitalasset.canton.telemetry.OpenTelemetryFactory
import com.typesafe.scalalogging.LazyLogging
import io.opentelemetry.api.OpenTelemetry
import io.opentelemetry.api.metrics.Meter
@ -27,7 +26,6 @@ import io.opentelemetry.exporter.prometheus.PrometheusHttpServer
import io.opentelemetry.instrumentation.runtimemetrics.java8.*
import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder
import io.opentelemetry.sdk.metrics.`export`.{MetricExporter, MetricReader, PeriodicMetricReader}
import io.opentelemetry.sdk.metrics.internal.state.MetricStorage
import java.io.File
import java.util.concurrent.ScheduledExecutorService
@ -45,7 +43,6 @@ final case class MetricsConfig(
reporters: Seq[MetricsReporterConfig] = Seq.empty,
jvmMetrics: Option[JvmMetrics] = None,
histograms: Seq[HistogramDefinition] = Seq.empty,
cardinality: PositiveInt = PositiveInt.tryCreate(MetricStorage.DEFAULT_MAX_CARDINALITY),
qualifiers: Seq[MetricQualification] = Seq[MetricQualification](
MetricQualification.Errors,
MetricQualification.Latency,
@ -270,15 +267,10 @@ object MetricsRegistry extends LazyLogging {
}
.zip(config.reporters)
.foreach { case (reader, readerConfig) =>
OpenTelemetryFactory
.registerMetricsReaderWithCardinality(
sdkMeterProviderBuilder,
FilteringMetricsReader.create(readerConfig.filters, reader),
config.cardinality.unwrap,
)
.foreach { case (reader, config) =>
sdkMeterProviderBuilder
.registerMetricReader(FilteringMetricsReader.create(config.filters, reader))
.discard
}
sdkMeterProviderBuilder
}

View File

@ -52,10 +52,8 @@ message TrafficConsumed {
uint64 extra_traffic_consumed = 2;
// Remaining free base traffic
uint64 base_traffic_remainder = 3;
// Cost deducted at `timestamp`, only present when traffic was consumed at `timestamp`, otherwise is set to 0
uint64 last_consumed_cost = 4;
// Timestamp at which this state is valid - this timestamp is used to compute the base traffic remainder above
int64 sequencing_timestamp = 5; // in microseconds of UTC time since Unix epoch
int64 sequencing_timestamp = 4; // in microseconds of UTC time since Unix epoch
}
// Message representing a traffic purchase made on behalf of a member
@ -79,12 +77,10 @@ message TrafficState {
int64 extra_traffic_consumed = 2;
// Amount of base traffic remaining
int64 base_traffic_remainder = 3;
// Cost deducted at `timestamp`, only present when traffic was consumed at `timestamp`, otherwise is set to 0
uint64 last_consumed_cost = 4;
// Timestamp at which the state is valid
int64 timestamp = 5;
int64 timestamp = 4;
// Optional serial of the balance update that updated the extra traffic limit
google.protobuf.UInt32Value serial = 6;
google.protobuf.UInt32Value serial = 5;
}
message SetTrafficPurchasedMessage {

View File

@ -348,11 +348,11 @@ object CantonRequireTypes {
/** Length limitation for an [[com.digitalasset.canton.protocol.LfTemplateId]].
* A [[com.digitalasset.canton.protocol.LfTemplateId]] consists of
* - The module name ([[com.daml.lf.data.Ref.DottedName]])
* - The template name ([[com.daml.lf.data.Ref.DottedName]])
* - The module name ([[com.digitalasset.daml.lf.data.Ref.DottedName]])
* - The template name ([[com.digitalasset.daml.lf.data.Ref.DottedName]])
* - The package ID
* - Two separating dots
* Each [[com.daml.lf.data.Ref.DottedName]] can have 1000 chars ([[com.daml.lf.data.Ref.DottedName.maxLength]]).
* Each [[com.digitalasset.daml.lf.data.Ref.DottedName]] can have 1000 chars ([[com.digitalasset.daml.lf.data.Ref.DottedName.maxLength]]).
* So a [[com.digitalasset.canton.protocol.LfTemplateId]] serializes to 1000 + 1000 + 64 + 2 = 2066 chars.
*
* 2066 is beyond the string size for Oracle's `NVARCHAR2` column type unless `max_string_size` is set to `extended`.

View File

@ -5,7 +5,7 @@ package com.digitalasset.canton.data
import cats.syntax.either.*
import cats.syntax.traverse.*
import com.daml.lf.value.{Value, ValueCoder, ValueOuterClass}
import com.digitalasset.daml.lf.value.{Value, ValueCoder, ValueOuterClass}
import com.digitalasset.canton.ProtoDeserializationError.{
FieldNotSet,
OtherError,

View File

@ -3,7 +3,7 @@
package com.digitalasset.canton.data
import com.daml.lf.data.Time.Timestamp
import com.digitalasset.daml.lf.data.Time.Timestamp
import com.daml.logging.entries.{LoggingValue, ToLoggingValue}
import java.time.Duration
@ -19,9 +19,9 @@ sealed trait DeduplicationPeriod extends Product with Serializable
object DeduplicationPeriod {
/** Transforms the `period` into a [[com.daml.lf.data.Time.Timestamp]] to be used for deduplication into the future(deduplicateUntil).
/** Transforms the `period` into a [[com.digitalasset.daml.lf.data.Time.Timestamp]] to be used for deduplication into the future(deduplicateUntil).
* Only used for backwards compatibility
* @param time The time to use for calculating the [[com.daml.lf.data.Time.Timestamp]]. It can either be submission time or current time, based on usage
* @param time The time to use for calculating the [[com.digitalasset.daml.lf.data.Time.Timestamp]]. It can either be submission time or current time, based on usage
* @param period The deduplication period
*/
def deduplicateUntil(

View File

@ -3,13 +3,11 @@
package com.digitalasset.canton.data
import com.daml.lf.data.{Bytes, Ref}
import com.digitalasset.daml.lf.data.{Bytes, Ref}
import com.daml.logging.entries.{LoggingValue, ToLoggingValue}
import com.digitalasset.canton.data.Offset.beforeBegin
import com.google.protobuf.ByteString
import java.io.InputStream
import java.nio.{ByteBuffer, ByteOrder}
/** Offsets into streams with hierarchical addressing.
*
@ -33,16 +31,10 @@ final case class Offset(bytes: Bytes) extends Ordered[Offset] {
def toByteArray: Array[Byte] = bytes.toByteArray
def toHexString: Ref.HexString = bytes.toHexString
def toLong: Long =
if (this == beforeBegin) 0L
else ByteBuffer.wrap(bytes.toByteArray).getLong(1)
}
object Offset {
val beforeBegin: Offset = new Offset(Bytes.Empty)
private val longBasedByteLength: Int = 9 // One byte for the version plus 8 bytes for Long
private val versionUpstreamOffsetsAsLong: Byte = 0
def fromByteString(bytes: ByteString) = new Offset(Bytes.fromByteString(bytes))
@ -52,21 +44,6 @@ object Offset {
def fromHexString(s: Ref.HexString) = new Offset(Bytes.fromHexString(s))
def fromLong(l: Long): Offset =
if (l == 0L) beforeBegin
else
Offset(
com.daml.lf.data.Bytes.fromByteString(
ByteString.copyFrom(
ByteBuffer
.allocate(longBasedByteLength)
.order(ByteOrder.BIG_ENDIAN)
.put(0, versionUpstreamOffsetsAsLong)
.putLong(1, l)
)
)
)
implicit val `Offset to LoggingValue`: ToLoggingValue[Offset] = value =>
LoggingValue.OfString(value.toHexString)
}

View File

@ -3,9 +3,9 @@
package com.digitalasset.canton.data
import com.daml.lf.data.{Bytes, Ref, Time}
import com.daml.lf.transaction.{GlobalKeyWithMaintainers, Node, TransactionVersion}
import com.daml.lf.value.Value
import com.digitalasset.daml.lf.data.{Bytes, Ref, Time}
import com.digitalasset.daml.lf.transaction.{GlobalKeyWithMaintainers, Node, TransactionVersion}
import com.digitalasset.daml.lf.value.Value
/** An explicitly-disclosed contract that has been used during command interpretation
* and enriched with additional contract metadata.

View File

@ -5,7 +5,7 @@ package com.digitalasset.canton.data
import cats.syntax.either.*
import cats.syntax.traverse.*
import com.daml.lf.data.Ref
import com.digitalasset.daml.lf.data.Ref
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.*
import com.digitalasset.canton.crypto.*

View File

@ -158,7 +158,7 @@ final case class TransactionView private (
subviews = Some(subviews.toProtoV30),
)
/** The global key inputs that the [[com.daml.lf.transaction.ContractStateMachine]] computes
/** The global key inputs that the [[com.digitalasset.daml.lf.transaction.ContractStateMachine]] computes
* while interpreting the root action of the view, enriched with the maintainers of the key and the
* [[com.digitalasset.canton.protocol.LfTransactionVersion]] to be used for serializing the key.
*

View File

@ -56,13 +56,13 @@ import com.google.protobuf.ByteString
* For [[com.digitalasset.canton.protocol.WellFormedTransaction]]s, the creation therefore is not rolled
* back either as the archival can only refer to non-rolled back creates.
* @param resolvedKeys
* Specifies how to resolve [[com.daml.lf.engine.ResultNeedKey]] requests from DAMLe (resulting from e.g., fetchByKey,
* Specifies how to resolve [[com.digitalasset.daml.lf.engine.ResultNeedKey]] requests from DAMLe (resulting from e.g., fetchByKey,
* lookupByKey) when interpreting the view. The resolved contract IDs must be in the [[coreInputs]].
* Stores only the resolution difference between this view's global key inputs
* [[com.digitalasset.canton.data.TransactionView.globalKeyInputs]]
* and the aggregated global key inputs from the subviews
* (see [[com.digitalasset.canton.data.TransactionView.globalKeyInputs]] for the aggregation algorithm).
* In [[com.daml.lf.transaction.ContractKeyUniquenessMode.Strict]],
* In [[com.digitalasset.daml.lf.transaction.ContractKeyUniquenessMode.Strict]],
* the [[com.digitalasset.canton.data.FreeKey]] resolutions must be checked during conflict detection.
* @param actionDescription The description of the root action of the view
* @param rollbackContext The rollback context of the root action of the view.
@ -355,7 +355,7 @@ object ViewParticipantData
* and the key is not in [[ViewParticipantData.resolvedKeys]].
* @throws com.digitalasset.canton.serialization.SerializationCheckFailed if this instance cannot be serialized
*/
@throws[SerializationCheckFailed[com.daml.lf.value.ValueCoder.EncodeError]]
@throws[SerializationCheckFailed[com.digitalasset.daml.lf.value.ValueCoder.EncodeError]]
def tryCreate(hashOps: HashOps)(
coreInputs: Map[LfContractId, InputContract],
createdCore: Seq[CreatedContract],

View File

@ -18,7 +18,6 @@ import com.digitalasset.canton.{
DoNotTraverseLikeFuture,
}
import java.util.concurrent.CompletionException
import scala.concurrent.{Awaitable, ExecutionContext, Future}
import scala.util.chaining.*
import scala.util.{Failure, Success, Try}
@ -82,14 +81,8 @@ object FutureUnlessShutdown {
apply(f.transform({
case Success(value) => Success(UnlessShutdown.Outcome(value))
case Failure(AbortedDueToShutdownException(_)) => Success(UnlessShutdown.AbortedDueToShutdown)
case Failure(ce: CompletionException) =>
ce.getCause match {
case AbortedDueToShutdownException(_) => Success(UnlessShutdown.AbortedDueToShutdown)
case _ => Failure(ce)
}
case Failure(other) => Failure(other)
}))
}
/** Monad combination of `Future` and [[UnlessShutdown]]

View File

@ -5,12 +5,12 @@ package com.digitalasset.canton.logging.pretty
import cats.Show.Shown
import com.daml.error.utils.DecodedCantonError
import com.daml.lf.data.Ref
import com.daml.lf.data.Ref.{DottedName, PackageId, QualifiedName}
import com.daml.lf.transaction.ContractStateMachine.ActiveLedgerState
import com.daml.lf.transaction.TransactionErrors.*
import com.daml.lf.transaction.Versioned
import com.daml.lf.value.Value
import com.digitalasset.daml.lf.data.Ref
import com.digitalasset.daml.lf.data.Ref.{DottedName, PackageId, QualifiedName}
import com.digitalasset.daml.lf.transaction.ContractStateMachine.ActiveLedgerState
import com.digitalasset.daml.lf.transaction.TransactionErrors.*
import com.digitalasset.daml.lf.transaction.Versioned
import com.digitalasset.daml.lf.value.Value
import com.daml.nonempty.{NonEmpty, NonEmptyUtil}
import com.digitalasset.canton.config.RequireTypes.{Port, RefinedNumeric}
import com.digitalasset.canton.data.DeduplicationPeriod
@ -189,10 +189,10 @@ trait PrettyInstances {
implicit def prettyLfQualifiedName: Pretty[QualifiedName] =
prettyOfString(qname => show"${qname.module}:${qname.name}")
implicit def prettyLfIdentifier: Pretty[com.daml.lf.data.Ref.Identifier] =
implicit def prettyLfIdentifier: Pretty[com.digitalasset.daml.lf.data.Ref.Identifier] =
prettyOfString(id => show"${id.packageId}:${id.qualifiedName}")
implicit def prettyLfPackageName: Pretty[com.daml.lf.data.Ref.PackageName] =
implicit def prettyLfPackageName: Pretty[com.digitalasset.daml.lf.data.Ref.PackageName] =
prettyOfString(packageName => show"${packageName.toString}")
implicit def prettyLfContractId: Pretty[LfContractId] = prettyOfString {

View File

@ -3,10 +3,10 @@
package com.digitalasset
import com.daml.lf.command.ReplayCommand
import com.daml.lf.data.{IdString, Ref, Time}
import com.daml.lf.transaction.{ContractStateMachine, Versioned}
import com.daml.lf.value.Value
import com.digitalasset.daml.lf.command.ReplayCommand
import com.digitalasset.daml.lf.data.{IdString, Ref, Time}
import com.digitalasset.daml.lf.transaction.{ContractStateMachine, Versioned}
import com.digitalasset.daml.lf.value.Value
import com.digitalasset.canton.data.{Counter, CounterCompanion}
import com.digitalasset.canton.serialization.DeterministicEncoding.encodeLong
import com.google.protobuf.ByteString

View File

@ -4,7 +4,7 @@
package com.digitalasset.canton.protocol
import cats.syntax.either.*
import com.daml.lf.data.Bytes
import com.digitalasset.daml.lf.data.Bytes
import com.digitalasset.canton.checked
import com.digitalasset.canton.config.CantonRequireTypes.String255
import com.digitalasset.canton.config.RequireTypes.NonNegativeInt

View File

@ -293,13 +293,13 @@ object OnboardingRestriction {
* Must be greater than `maxSequencingTime` specified by a participant,
* practically also requires extra slack to allow clock skew between participant and sequencer.
* @param onboardingRestriction current onboarding restrictions for participants
* @param acsCommitmentsCatchUpConfig Optional parameters of type [[com.digitalasset.canton.protocol.AcsCommitmentsCatchUpConfig]].
* Defined starting with protobuf version v2 and protocol version v30.
* If None, the catch-up mode is disabled: the participant does not trigger the
* catch-up mode when lagging behind.
* If not None, it specifies the number of reconciliation intervals that the
* participant skips in catch-up mode, and the number of catch-up intervals
* intervals a participant should lag behind in order to enter catch-up mode.
* @param catchUpParameters Optional parameters of type [[com.digitalasset.canton.protocol.AcsCommitmentsCatchUpConfig]].
* Defined starting with protobuf version v2 and protocol version v30.
* If None, the catch-up mode is disabled: the participant does not trigger the
* catch-up mode when lagging behind.
* If not None, it specifies the number of reconciliation intervals that the
* participant skips in catch-up mode, and the number of catch-up intervals
* intervals a participant should lag behind in order to enter catch-up mode.
*
* @throws DynamicDomainParameters$.InvalidDynamicDomainParameters
* if `mediatorDeduplicationTimeout` is less than twice of `ledgerTimeRecordTimeTolerance`.

View File

@ -3,7 +3,7 @@
package com.digitalasset.canton.protocol
import com.daml.lf.data.Bytes as LfBytes
import com.digitalasset.daml.lf.data.Bytes as LfBytes
import com.digitalasset.canton.crypto.Salt
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.serialization.ProtoConverter

View File

@ -4,8 +4,8 @@
package com.digitalasset.canton.protocol
import cats.syntax.either.*
import com.daml.lf.data.Ref
import com.daml.lf.value.{ValueCoder, ValueOuterClass}
import com.digitalasset.daml.lf.data.Ref
import com.digitalasset.daml.lf.value.{ValueCoder, ValueOuterClass}
import com.digitalasset.canton.serialization.ProtoConverter
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.{LfVersioned, ProtoDeserializationError}

View File

@ -4,7 +4,7 @@
package com.digitalasset.canton.protocol
import cats.syntax.either.*
import com.daml.lf.data.Bytes
import com.digitalasset.daml.lf.data.Bytes
import com.digitalasset.canton.ProtoDeserializationError.ValueDeserializationError
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.google.protobuf.ByteString

View File

@ -4,8 +4,8 @@
package com.digitalasset.canton.protocol
import cats.syntax.either.*
import com.daml.lf.data.Ref
import com.daml.lf.data.Ref.Identifier
import com.digitalasset.daml.lf.data.Ref
import com.digitalasset.daml.lf.data.Ref.Identifier
import com.digitalasset.canton.ProtoDeserializationError.ValueDeserializationError
object RefIdentifierSyntax {

View File

@ -5,7 +5,7 @@ package com.digitalasset.canton.protocol
import cats.implicits.toTraverseOps
import cats.syntax.either.*
import com.daml.lf.value.ValueCoder
import com.digitalasset.daml.lf.value.ValueCoder
import com.digitalasset.canton.ProtoDeserializationError.ValueConversionError
import com.digitalasset.canton.crypto.Salt
import com.digitalasset.canton.data.{CantonTimestamp, ProcessedDisclosedContract}

View File

@ -3,7 +3,7 @@
package com.digitalasset.canton.protocol
import com.daml.lf.data.Bytes as LfBytes
import com.digitalasset.daml.lf.data.Bytes as LfBytes
import com.digitalasset.canton.ProtoDeserializationError
import com.digitalasset.canton.data.{DeduplicationPeriod, Offset}
import com.digitalasset.canton.serialization.ProtoConverter.{DurationConverter, ParsingResult}

View File

@ -4,8 +4,8 @@
package com.digitalasset.canton.protocol
import cats.syntax.either.*
import com.daml.lf.transaction.{TransactionCoder, TransactionOuterClass}
import com.daml.lf.value.ValueCoder
import com.digitalasset.daml.lf.transaction.{TransactionCoder, TransactionOuterClass}
import com.digitalasset.daml.lf.value.ValueCoder
import com.digitalasset.canton.ProtoDeserializationError.ValueConversionError
import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult
import com.digitalasset.canton.serialization.{

View File

@ -3,7 +3,7 @@
package com.digitalasset.canton.protocol
import com.daml.lf.data.Bytes
import com.digitalasset.daml.lf.data.Bytes
import com.digitalasset.canton.crypto.Hash
/** A hash-based identifier for contracts.

View File

@ -12,22 +12,12 @@ import com.digitalasset.canton.topology.client.TopologySnapshot
import com.digitalasset.canton.topology.{ParticipantId, PartyId}
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.ShowUtil.*
import com.digitalasset.canton.util.{Checked, ErrorUtil, SetCover}
import com.digitalasset.canton.util.{Checked, ErrorUtil}
import scala.concurrent.{ExecutionContext, Future}
object RootHashMessageRecipients extends HasLoggerName {
/** Computes the list of recipients for the root hash messages of a confirmation request.
* Each recipient returned is either a participant or a group address
* [[com.digitalasset.canton.sequencing.protocol.ParticipantsOfParty]].
* The group addresses can be overlapping, but a participant member recipient will only be present if it is
* not included in any of the group addresses.
*
* @param informees informees of the confirmation request
* @param ipsSnapshot topology snapshot used at submission time
* @return list of root hash message recipients
*/
def rootHashRecipientsForInformees(
informees: Set[LfPartyId],
ipsSnapshot: TopologySnapshot,
@ -47,10 +37,10 @@ object RootHashMessageRecipients extends HasLoggerName {
)
)
)
participantsOfGroupAddressedInformees <- ipsSnapshot
.activeParticipantsOfPartiesWithGroupAddressing(
informeesList
)
groupAddressedInformees <- ipsSnapshot.partiesWithGroupAddressing(informeesList)
participantsOfGroupAddressedInformees <- ipsSnapshot.activeParticipantsOfParties(
groupAddressedInformees.toList
)
} yield {
// If there are several group-addressed informees with overlapping participants,
// we actually look for a set cover. It doesn't matter which one we pick.
@ -96,45 +86,28 @@ object RootHashMessageRecipients extends HasLoggerName {
} ++ directlyAddressedParticipants.map { participant =>
MemberRecipient(participant) -> Set(participant)
}
SetCover.greedy(sets)
// TODO(#13883) Use a set cover for the recipients instead of all of them
// SetCover.greedy(sets.toMap)
sets.map { case (recipient, _) => recipient }.toSeq
}
}
/** Validate the recipients of root hash messages received by a participant in Phase 3.
*/
def validateRecipientsOnParticipant(recipients: Recipients): Checked[Nothing, String, Unit] = {
// group members must be of size 2, which must be participant and mediator, due to previous checks
val validGroups = recipients.trees.collect {
case RecipientsTree(group, Seq()) if group.sizeCompare(2) == 0 => group
recipients.asSingleGroup match {
case Some(group) if group.sizeCompare(2) == 0 =>
// group members must be participantId and mediator, due to previous checks
Checked.unit
case Some(group) =>
val hasGroupAddressing = group.collect { case ParticipantsOfParty(party) =>
party.toLf
}.nonEmpty
if (hasGroupAddressing) Checked.unit
else Checked.continue(s"The root hash message has an invalid recipient group.\n$recipients")
case _ =>
Checked.continue(s"The root hash message has more than one recipient group.\n$recipients")
}
if (validGroups.size == recipients.trees.size) {
val allUseGroupAddressing = validGroups.forall {
_.exists {
case ParticipantsOfParty(_) => true
case _ => false
}
}
// Due to how rootHashRecipientsForInformees() computes recipients, if there is more than one group,
// they must all address the participant using group addressing.
if (allUseGroupAddressing || validGroups.sizeCompare(1) == 0) Checked.unit
else
Checked.continue(
s"The root hash message has more than one recipient group, not all using group addressing.\n$recipients"
)
} else Checked.continue(s"The root hash message has invalid recipient groups.\n$recipients")
}
/** Validate the recipients of root hash messages received by a mediator in Phase 2.
*
* A recipient is valid if each recipient tree:
* - contains only a single recipient group (no children)
* - the recipient group is if size 2
* - the recipient group contains:
* - the mediator group recipient
* - either a participant member recipient or a PartyOfParticipant group recipient
*/
def wrongAndCorrectRecipients(
recipientsList: Seq[Recipients],
mediator: MediatorGroupRecipient,
@ -142,14 +115,18 @@ object RootHashMessageRecipients extends HasLoggerName {
val (wrongRecipients, correctRecipients) = recipientsList.flatMap { recipients =>
recipients.trees.toList.map {
case tree @ RecipientsTree(group, Seq()) =>
val hasMediator = group.contains(mediator)
val hasParticipantOrPop = group.exists {
case MemberRecipient(_: ParticipantId) | ParticipantsOfParty(_) => true
val participantCount = group.count {
case MemberRecipient(_: ParticipantId) => true
case _ => false
}
val groupAddressCount = group.count {
case ParticipantsOfParty(_) => true
case _ => false
}
val groupAddressingBeingUsed = groupAddressCount > 0
Either.cond(
group.sizeCompare(2) == 0 && hasMediator && hasParticipantOrPop,
((group.size == 2) || (groupAddressingBeingUsed && group.size >= 2)) &&
group.contains(mediator) && (participantCount + groupAddressCount > 0),
group,
tree,
)

View File

@ -3,10 +3,10 @@
package com.digitalasset.canton
import com.daml.lf.crypto.Hash
import com.daml.lf.data.Ref
import com.daml.lf.transaction.*
import com.daml.lf.value.Value
import com.digitalasset.daml.lf.crypto.Hash
import com.digitalasset.daml.lf.data.Ref
import com.digitalasset.daml.lf.transaction.*
import com.digitalasset.daml.lf.value.Value
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.data.ViewType
import com.digitalasset.canton.protocol.messages.EncryptedViewMessage

View File

@ -59,16 +59,15 @@ final case class SubmissionRequest private (
@VisibleForTesting
def isConfirmationRequest: Boolean = {
val hasParticipantOrPopRecipient = batch.allRecipients.exists {
case MemberRecipient(_: ParticipantId) => true
case ParticipantsOfParty(_) => true
case _ => false
val hasParticipantRecipient = batch.allMembers.exists {
case _: ParticipantId => true
case _: Member => false
}
val hasMediatorRecipient = batch.allRecipients.exists {
case _: MediatorGroupRecipient => true
case _: Recipient => false
}
hasParticipantOrPopRecipient && hasMediatorRecipient
hasParticipantRecipient && hasMediatorRecipient
}
// Caches the serialized request to be able to do checks on its size without re-serializing

View File

@ -25,35 +25,31 @@ final case class TrafficState(
extraTrafficPurchased: NonNegativeLong,
extraTrafficConsumed: NonNegativeLong,
baseTrafficRemainder: NonNegativeLong,
lastConsumedCost: NonNegativeLong,
timestamp: CantonTimestamp,
serial: Option[PositiveInt],
) extends PrettyPrinting {
def extraTrafficRemainder: Long = extraTrafficPurchased.value - extraTrafficConsumed.value
// Need big decimal here because it could overflow a long especially if extraTrafficPurchased == Long.MAX
lazy val availableTraffic: BigDecimal =
BigDecimal(extraTrafficRemainder) + BigDecimal(baseTrafficRemainder.value)
def availableTraffic: Long = extraTrafficRemainder + baseTrafficRemainder.value
def toProtoV30: v30.TrafficState = v30.TrafficState(
extraTrafficPurchased = extraTrafficPurchased.value,
extraTrafficConsumed = extraTrafficConsumed.value,
baseTrafficRemainder = baseTrafficRemainder.value,
lastConsumedCost = lastConsumedCost.value,
timestamp = timestamp.toProtoPrimitive,
serial = serial.map(_.value),
)
def toTrafficConsumed(member: Member): TrafficConsumed =
TrafficConsumed(
member = member,
sequencingTimestamp = timestamp,
extraTrafficConsumed = extraTrafficConsumed,
baseTrafficRemainder = baseTrafficRemainder,
lastConsumedCost = lastConsumedCost,
)
def toTrafficConsumed(member: Member): TrafficConsumed = TrafficConsumed(
member = member,
sequencingTimestamp = timestamp,
extraTrafficConsumed = extraTrafficConsumed,
baseTrafficRemainder = baseTrafficRemainder,
)
def toTrafficReceipt: TrafficReceipt = TrafficReceipt(
consumedCost = lastConsumedCost,
def toTrafficReceipt(
consumedCost: NonNegativeLong
): TrafficReceipt = TrafficReceipt(
consumedCost = consumedCost,
extraTrafficConsumed = extraTrafficConsumed,
baseTrafficRemainder = baseTrafficRemainder,
)
@ -71,7 +67,6 @@ final case class TrafficState(
param("extraTrafficLimit", _.extraTrafficPurchased),
param("extraTrafficConsumed", _.extraTrafficConsumed),
param("baseTrafficRemainder", _.baseTrafficRemainder),
param("lastConsumedCost", _.lastConsumedCost),
param("timestamp", _.timestamp),
paramIfDefined("serial", _.serial),
)
@ -83,15 +78,13 @@ object TrafficState {
pp >> Some(v.extraTrafficPurchased.value)
pp >> Some(v.extraTrafficConsumed.value)
pp >> Some(v.baseTrafficRemainder.value)
pp >> Some(v.lastConsumedCost.value)
pp >> v.timestamp
pp >> v.serial.map(_.value)
}
implicit val getResultTrafficState: GetResult[Option[TrafficState]] = {
GetResult
.createGetTuple6(
nonNegativeLongOptionGetResult,
.createGetTuple5(
nonNegativeLongOptionGetResult,
nonNegativeLongOptionGetResult,
nonNegativeLongOptionGetResult,
@ -105,7 +98,6 @@ object TrafficState {
NonNegativeLong.zero,
NonNegativeLong.zero,
NonNegativeLong.zero,
NonNegativeLong.zero,
CantonTimestamp.Epoch,
Option.empty,
)
@ -114,7 +106,6 @@ object TrafficState {
NonNegativeLong.zero,
NonNegativeLong.zero,
NonNegativeLong.zero,
NonNegativeLong.zero,
timestamp,
Option.empty,
)
@ -125,14 +116,12 @@ object TrafficState {
extraTrafficLimit <- ProtoConverter.parseNonNegativeLong(trafficStateP.extraTrafficPurchased)
extraTrafficConsumed <- ProtoConverter.parseNonNegativeLong(trafficStateP.extraTrafficConsumed)
baseTrafficRemainder <- ProtoConverter.parseNonNegativeLong(trafficStateP.baseTrafficRemainder)
lastConsumedCost <- ProtoConverter.parseNonNegativeLong(trafficStateP.lastConsumedCost)
timestamp <- CantonTimestamp.fromProtoPrimitive(trafficStateP.timestamp)
serial <- trafficStateP.serial.traverse(ProtoConverter.parsePositiveInt)
} yield TrafficState(
extraTrafficLimit,
extraTrafficConsumed,
baseTrafficRemainder,
lastConsumedCost,
timestamp,
serial,
)

View File

@ -24,18 +24,18 @@ import slick.jdbc.GetResult
* @param sequencingTimestamp sequencing timestamp at which this traffic consumed state is valid
* @param extraTrafficConsumed extra traffic consumed at this sequencing timestamp
* @param baseTrafficRemainder base traffic remaining at this sequencing timestamp
* @param lastConsumedCost last cost deducted from the traffic balance (base and if not enough, extra)
*/
final case class TrafficConsumed(
member: Member,
sequencingTimestamp: CantonTimestamp,
extraTrafficConsumed: NonNegativeLong,
baseTrafficRemainder: NonNegativeLong,
lastConsumedCost: NonNegativeLong,
) extends PrettyPrinting {
def toTrafficReceipt: TrafficReceipt = TrafficReceipt(
consumedCost = lastConsumedCost,
def toTrafficReceipt(
consumedCost: NonNegativeLong
): TrafficReceipt = TrafficReceipt(
consumedCost = consumedCost,
extraTrafficConsumed,
baseTrafficRemainder,
)
@ -48,7 +48,6 @@ final case class TrafficConsumed(
trafficPurchased.map(_.extraTrafficPurchased).getOrElse(NonNegativeLong.zero),
extraTrafficConsumed,
baseTrafficRemainder,
lastConsumedCost,
trafficPurchased
.map(_.sequencingTimestamp.max(sequencingTimestamp))
.getOrElse(sequencingTimestamp),
@ -106,7 +105,6 @@ final case class TrafficConsumed(
copy(
baseTrafficRemainder = baseTrafficRemainderAtCurrentTime,
sequencingTimestamp = timestamp,
lastConsumedCost = NonNegativeLong.zero,
)
}
@ -129,7 +127,6 @@ final case class TrafficConsumed(
baseTrafficRemainder = baseTrafficRemainderAfterConsume,
extraTrafficConsumed = this.extraTrafficConsumed + extraTrafficConsumed,
sequencingTimestamp = sequencingTimestamp,
lastConsumedCost = cost,
)
}
@ -160,7 +157,6 @@ final case class TrafficConsumed(
param("member", _.member),
param("extraTrafficConsumed", _.extraTrafficConsumed),
param("baseTrafficRemainder", _.baseTrafficRemainder),
param("lastConsumedCost", _.lastConsumedCost),
param("sequencingTimestamp", _.sequencingTimestamp),
)
@ -170,7 +166,6 @@ final case class TrafficConsumed(
extraTrafficConsumed = extraTrafficConsumed.value,
baseTrafficRemainder = baseTrafficRemainder.value,
sequencingTimestamp = sequencingTimestamp.toProtoPrimitive,
lastConsumedCost = lastConsumedCost.value,
)
}
}
@ -182,13 +177,7 @@ object TrafficConsumed {
/** TrafficConsumed object for members the first time they submit a submission request
*/
def init(member: Member): TrafficConsumed =
TrafficConsumed(
member,
CantonTimestamp.MinValue,
NonNegativeLong.zero,
NonNegativeLong.zero,
NonNegativeLong.zero,
)
TrafficConsumed(member, CantonTimestamp.MinValue, NonNegativeLong.zero, NonNegativeLong.zero)
def empty(
member: Member,
@ -199,18 +188,16 @@ object TrafficConsumed {
timestamp,
NonNegativeLong.zero,
baseTraffic,
NonNegativeLong.zero,
)
implicit val trafficConsumedOrdering: Ordering[TrafficConsumed] =
Ordering.by(_.sequencingTimestamp)
implicit val trafficConsumedGetResult: GetResult[TrafficConsumed] =
GetResult
.createGetTuple5[Member, CantonTimestamp, NonNegativeLong, NonNegativeLong, NonNegativeLong]
.andThen { case (member, ts, trafficConsumed, baseTraffic, lastConsumedCost) =>
TrafficConsumed(member, ts, trafficConsumed, baseTraffic, lastConsumedCost)
}
GetResult.createGetTuple4[Member, CantonTimestamp, NonNegativeLong, NonNegativeLong].andThen {
case (member, ts, trafficConsumed, baseTraffic) =>
TrafficConsumed(member, ts, trafficConsumed, baseTraffic)
}
def fromProtoV30(trafficConsumedP: TrafficConsumedP): ParsingResult[TrafficConsumed] =
for {
@ -224,14 +211,10 @@ object TrafficConsumed {
sequencingTimestamp <- CantonTimestamp.fromProtoPrimitive(
trafficConsumedP.sequencingTimestamp
)
lastConsumedCost <- ProtoConverter.parseNonNegativeLong(
trafficConsumedP.lastConsumedCost
)
} yield TrafficConsumed(
member = member,
extraTrafficConsumed = extraTrafficConsumed,
baseTrafficRemainder = baseTrafficRemainder,
sequencingTimestamp = sequencingTimestamp,
lastConsumedCost = lastConsumedCost,
)
}

View File

@ -43,7 +43,6 @@ class TrafficConsumedManager(
current.copy(
extraTrafficConsumed = trafficReceipt.extraTrafficConsumed,
baseTrafficRemainder = trafficReceipt.baseTrafficRemainder,
lastConsumedCost = trafficReceipt.consumedCost,
sequencingTimestamp = timestamp,
)
case current => current
@ -102,7 +101,7 @@ class TrafficConsumedManager(
}.discard
Left(value)
case Right(_) =>
val newState = updateAndGet {
val newState = trafficConsumed.updateAndGet {
_.consume(timestamp, params, eventCost, logger)
}
logger.debug(s"Consumed ${eventCost.value} for $member at $timestamp: new state $newState")

View File

@ -5,7 +5,7 @@ package com.digitalasset.canton.serialization
import cats.syntax.either.*
import cats.syntax.traverse.*
import com.daml.lf.data.Ref
import com.digitalasset.daml.lf.data.Ref
import com.daml.nonempty.NonEmpty
import com.daml.nonempty.catsinstances.*
import com.digitalasset.canton.ProtoDeserializationError.{

View File

@ -435,6 +435,23 @@ object TopologyManagerError extends TopologyManagerErrorGroup {
with TopologyManagerError
}
@Explanation(
"This error indicates that a threshold in the submitted transaction was higher than the number of members that would have to satisfy that threshold."
)
@Resolution(
"""Submit the topology transaction with a lower threshold.
|The metadata details of this error contain the expected maximum in the field ``expectedMaximum``."""
)
object InvalidThreshold
extends ErrorCode(id = "INVALID_THRESHOLD", ErrorCategory.InvalidIndependentOfSystemState) {
final case class ThresholdTooHigh(actual: Int, expectedMaximum: Int)(implicit
override val loggingContext: ErrorLoggingContext
) extends CantonError.Impl(
cause = s"Threshold must not be higher than $expectedMaximum, but was $actual."
)
with TopologyManagerError
}
@Explanation(
"This error indicates that members referenced in a topology transaction have not declared at least one signing key or at least 1 encryption key or both."
)
@ -456,20 +473,6 @@ object TopologyManagerError extends TopologyManagerErrorGroup {
with TopologyManagerError
}
object PartyExceedsHostingLimit
extends ErrorCode(
id = "PARTY_EXCEEDS_HOSTING_LIMIT",
ErrorCategory.InvalidIndependentOfSystemState,
) {
final case class Reject(party: PartyId, limit: Int, numParticipants: Int)(implicit
override val loggingContext: ErrorLoggingContext
) extends CantonError.Impl(
cause =
s"Party $party exceeds hosting limit of $limit with desired number of $numParticipants hosting participant."
)
with TopologyManagerError
}
@Explanation(
"This error indicates that the topology transaction references members that are currently unknown."
)
@ -569,7 +572,7 @@ object TopologyManagerError extends TopologyManagerErrorGroup {
object InvalidTopologyMapping
extends ErrorCode(
id = "INVALID_TOPOLOGY_MAPPING",
ErrorCategory.InvalidIndependentOfSystemState,
ErrorCategory.InvalidGivenCurrentSystemStateOther,
) {
final case class Reject(
description: String
@ -602,36 +605,7 @@ object TopologyManagerError extends TopologyManagerErrorGroup {
}
)
with TopologyManagerError
final case class MissingDomainParameters(effectiveTime: EffectiveTime)(implicit
override val loggingContext: ErrorLoggingContext
) extends CantonError.Impl(
cause = s"Missing domain parameters at $effectiveTime"
)
with TopologyManagerError
}
@Explanation(
"""This error indicates that the namespace is already used by another entity."""
)
@Resolution(
"""Change the namespace used in the submitted topology transaction."""
)
object NamespaceAlreadyInUse
extends ErrorCode(
id = "NAMESPACE_ALREADY_IN_USE",
ErrorCategory.InvalidGivenCurrentSystemStateResourceExists,
) {
final case class Reject(
namespace: Namespace
)(implicit
override val loggingContext: ErrorLoggingContext
) extends CantonError.Impl(
cause = s"The namespace $namespace is already in use by another entity."
)
with TopologyManagerError
}
abstract class DomainErrorGroup extends ErrorGroup()
abstract class ParticipantErrorGroup extends ErrorGroup()

View File

@ -167,7 +167,6 @@ class TopologyStateProcessor(
s"${enqueuingOrStoring} topology transaction ${idx + 1}/$ln ${tx.operation} ${tx.mapping} with ts=$effective (epsilon=${epsilon} ms)"
)
case (ValidatedTopologyTransaction(tx, Some(r), _), idx) =>
// TODO(i19737): we need to emit a security alert, if the rejection is due to a malicious broadcast
logger.info(
s"Rejected transaction ${idx + 1}/$ln ${tx.operation} ${tx.mapping} at ts=$effective (epsilon=${epsilon} ms) due to $r"
)
@ -297,13 +296,18 @@ class TopologyStateProcessor(
authValidator
.validateAndUpdateHeadAuthState(
effective.value,
toValidate,
inStore,
Seq(toValidate),
inStore.map(tx => tx.mapping.uniqueKey -> tx).toList.toMap,
expectFullAuthorization,
)
)
.subflatMap { case (_, tx) =>
tx.rejectionReason.toLeft(tx.transaction)
.subflatMap { case (_, txs) =>
// TODO(#12390) proper error
txs.headOption
.toRight[TopologyTransactionRejection](
TopologyTransactionRejection.Other("expected validation result doesn't exist")
)
.flatMap(tx => tx.rejectionReason.toLeft(tx.transaction))
}
}

View File

@ -4,7 +4,7 @@
package com.digitalasset.canton.topology.client
import cats.data.EitherT
import com.daml.lf.data.Ref.PackageId
import com.digitalasset.daml.lf.data.Ref.PackageId
import com.digitalasset.canton.concurrent.FutureSupervisor
import com.digitalasset.canton.config.{BatchingConfig, CachingConfigs, ProcessingTimeout}
import com.digitalasset.canton.crypto.SigningPublicKey

View File

@ -8,7 +8,7 @@ import cats.data.EitherT
import cats.syntax.functor.*
import cats.syntax.functorFilter.*
import cats.syntax.parallel.*
import com.daml.lf.data.Ref.PackageId
import com.digitalasset.daml.lf.data.Ref.PackageId
import com.digitalasset.canton.concurrent.HasFutureSupervision
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.crypto.{EncryptionPublicKey, SigningPublicKey}
@ -299,10 +299,6 @@ trait PartyTopologySnapshotClient {
parties: Seq[LfPartyId]
)(implicit traceContext: TraceContext): Future[Set[LfPartyId]]
def activeParticipantsOfPartiesWithGroupAddressing(
parties: Seq[LfPartyId]
)(implicit traceContext: TraceContext): Future[Map[LfPartyId, Set[ParticipantId]]]
/** Returns a list of all known parties on this domain */
def inspectKnownParties(
filterParty: String,
@ -845,11 +841,6 @@ private[client] trait PartyTopologySnapshotLoader
): Future[Set[LfPartyId]] =
loadAndMapPartyInfos(parties, identity, _.groupAddressing).map(_.keySet)
final override def activeParticipantsOfPartiesWithGroupAddressing(
parties: Seq[LfPartyId]
)(implicit traceContext: TraceContext): Future[Map[LfPartyId, Set[ParticipantId]]] =
loadAndMapPartyInfos(parties, _.participants.keySet, _.groupAddressing)
final override def consortiumThresholds(
parties: Set[LfPartyId]
)(implicit traceContext: TraceContext): Future[Map[LfPartyId, PositiveInt]] =

View File

@ -5,7 +5,7 @@ package com.digitalasset.canton.topology.client
import cats.data.EitherT
import cats.syntax.functor.*
import com.daml.lf.data.Ref.PackageId
import com.digitalasset.daml.lf.data.Ref.PackageId
import com.daml.nameof.NameOf.functionFullName
import com.digitalasset.canton.SequencerCounter
import com.digitalasset.canton.concurrent.FutureSupervisor

View File

@ -5,7 +5,7 @@ package com.digitalasset.canton.topology.client
import cats.data.EitherT
import cats.syntax.functorFilter.*
import com.daml.lf.data.Ref.PackageId
import com.digitalasset.daml.lf.data.Ref.PackageId
import com.digitalasset.canton.LfPartyId
import com.digitalasset.canton.crypto.{KeyPurpose, SigningPublicKey}
import com.digitalasset.canton.data.CantonTimestamp

View File

@ -8,12 +8,12 @@ import com.digitalasset.canton.crypto.{Fingerprint, SigningPublicKey}
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.topology.Namespace
import com.digitalasset.canton.topology.processing.AuthorizedTopologyTransaction.AuthorizedNamespaceDelegation
import com.digitalasset.canton.topology.transaction.TopologyChangeOp.{Remove, Replace}
import com.digitalasset.canton.topology.transaction.*
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.ErrorUtil
import com.digitalasset.canton.util.ShowUtil.*
import scala.annotation.tailrec
import scala.collection.concurrent.TrieMap
import scala.math.Ordering.Implicits.*
@ -35,8 +35,8 @@ object AuthorizedTopologyTransaction {
/** Returns true if the namespace delegation is a root certificate
*
* A root certificate is defined by a namespace delegation that authorizes the
* key f to act on the namespace spanned by f, authorized by f.
* A root certificate is defined by the namespace delegation that authorizes the
* key f to act on namespace spanned by f, authorized by f.
*/
def isRootCertificate(namespaceDelegation: AuthorizedNamespaceDelegation): Boolean = {
NamespaceDelegation.isRootCertificate(namespaceDelegation.transaction)
@ -44,7 +44,11 @@ object AuthorizedTopologyTransaction {
/** Returns true if the namespace delegation is a root certificate or a root delegation
*
* A root delegation is a namespace delegation whose target key may be used to authorize other namespace delegations.
* A root certificate is defined by the namespace delegation that authorizes the
* key f to act on namespace spanned by f, authorized by f.
*
* A root delegation is defined by the namespace delegation the authorizes the
* key g to act on namespace spanned by f.
*/
def isRootDelegation(namespaceDelegation: AuthorizedNamespaceDelegation): Boolean = {
NamespaceDelegation.isRootDelegation(namespaceDelegation.transaction)
@ -52,45 +56,49 @@ object AuthorizedTopologyTransaction {
}
/** Stores a set of namespace delegations, tracks dependencies and
* determines which keys are authorized to sign on behalf of a namespace.
/** maintain a dependency graph for the namespace delegations
*
* Namespace delegations are a bit tricky as there can be an arbitrary number of delegations between the namespace key
* and the key that will be used for authorizations. Think of it as a certificate chain where we get a
* namespace delegations are a bit tricky as there can be an arbitrary number of delegations before we reach
* the actual key that will be used for authorizations. think of it as a certificate chain where we get a
* series of certificates and we need to figure out a path from one certificate to the root certificate.
*
* NOTE: this class is not thread-safe
*
* Properties of the graph:
* - Each node corresponds to a target key
* - The node with key fingerprint of the namespace is the root node
* - The edges between nodes are namespace delegations.
* If key A signs a namespace delegation with target key B, then key A authorizes key B to act on the namespace.
* In this case, the edge is outgoing from node A and incoming into node B.
* - The graph may have cycles. The implementation does not get confused by this.
* properties of the graph:
* - the nodes are the target key fingerprints
* - the node with fingerprint of the namespace is the root node
* - the edges between the nodes are the authorizations where key A authorizes key B to act on the namespace
* in this case, the authorization is outgoing from A and incoming to B.
* - the graph SHOULD be a directed acyclic graph, but we MIGHT have cycles (i.e. key A authorizing B, B authorizing A).
* we don't need to make a fuss about cycles in the graph. we just ignore / report them assuming it was an admin
* mistake, but we don't get confused.
* - root certificates are edges pointing to the node itself. they are separate such that they don't show up
* in the list of incoming / outgoing.
* - we track for each node the set of outgoing edges and incoming edges. an outgoing edge is a delegation where
* the source node is authorizing a target node. obviously every outgoing edge is also an incoming edge.
*
* Computation task:
* The graph maintains a set of nodes that are connected to the root node. Those correspond to the keys that are
* authorized to sign on behalf of the namespace.
* computation task:
* - once we've modified the graph, we compute the nodes that are somehow connected to the root node.
*
* Limitation: clients need to ensure that the namespace delegations added have valid signatures.
* If delegations with invalid signatures are added, authorization will break.
* purpose:
* - once we know which target keys are actually authorized to act on this particular namespace, we can then use
* this information to find out which resulting mapping is properly authorized and which one is not.
*
* @param extraDebugInfo whether to log the authorization graph at debug level on every recomputation
* authorization checks:
* - when adding "single transactions", we do check that the transaction is properly authorized. otherwise we
* "ignore" it (returning false). this is used during processing.
* - when adding "batch transactions", we don't check that all of them are properly authorized, as we do allow
* temporarily "nodes" to be unauthorized (so that errors can be fixed by adding a replacement certificate)
* - when removing transactions, we do check that the authorizing key is authorized. but note that the authorizing
* key of an edge REMOVAL doesn't need to match the key used to authorized the ADD.
*/
class AuthorizationGraph(
val namespace: Namespace,
extraDebugInfo: Boolean,
override protected val loggerFactory: NamedLoggerFactory,
val loggerFactory: NamedLoggerFactory,
) extends AuthorizationCheck
with NamedLogging {
/** @param root the last active root certificate for `target`
* @param outgoing all active namespace delegations (excluding root certificates) authorized by `target`
* @param incoming all active namespace delegations for the namespace `target`
*
* All namespace delegations are for namespace `this.namespace`.
*/
private case class GraphNode(
target: Fingerprint,
root: Option[AuthorizedNamespaceDelegation] = None,
@ -105,9 +113,9 @@ class AuthorizationGraph(
private abstract class AuthLevel(val isAuth: Boolean, val isRoot: Boolean)
private object AuthLevel {
private object NotAuthorized extends AuthLevel(false, false)
private object Standard extends AuthLevel(true, false)
private object RootDelegation extends AuthLevel(true, true)
object NotAuthorized extends AuthLevel(false, false)
object Standard extends AuthLevel(true, false)
object RootDelegation extends AuthLevel(true, true)
implicit val orderingAuthLevel: Ordering[AuthLevel] =
Ordering.by[AuthLevel, Int](authl => Seq(authl.isAuth, authl.isRoot).count(identity))
@ -121,30 +129,23 @@ class AuthorizationGraph(
}
/** GraphNodes by GraphNode.target */
private val nodes = new TrieMap[Fingerprint, GraphNode]()
/** Authorized namespace delegations for namespace `this.namespace`, grouped by target */
private val cache =
new TrieMap[Fingerprint, AuthorizedNamespaceDelegation]()
/** Check if `item` is authorized and, if so, add its mapping to this graph.
/** temporary cache for the current graph authorization check results
*
* @throws java.lang.IllegalArgumentException if `item` does not refer to `namespace` or the operation is not REPLACE.
* if a fingerprint is empty, then we haven't yet computed the answer
*/
private val cache =
new TrieMap[Fingerprint, Option[AuthorizedNamespaceDelegation]]()
def add(item: AuthorizedNamespaceDelegation)(implicit traceContext: TraceContext): Boolean = {
ErrorUtil.requireArgument(
item.mapping.namespace == namespace,
s"unable to add namespace delegation for ${item.mapping.namespace} to graph for $namespace",
s"added namespace ${item.mapping.namespace} to $namespace",
)
ErrorUtil.requireArgument(
item.operation == Replace,
s"unable to add namespace delegation with operation ${item.operation} to graph for $namespace",
)
if (
AuthorizedTopologyTransaction.isRootCertificate(item) ||
this.existsAuthorizedKeyIn(item.signingKeys, requireRoot = true)
this.areValidAuthorizationKeys(item.signingKeys, requireRoot = true)
) {
doAdd(item)
recompute()
@ -152,12 +153,6 @@ class AuthorizationGraph(
} else false
}
/** Add the mappings in `items` to this graph, regardless if they are authorized or not.
* If an unauthorized namespace delegation is added to the graph, the graph will contain nodes that are not connected to the root.
* The target key of the unauthorized delegation will still be considered unauthorized.
*
* @throws java.lang.IllegalArgumentException if `item` does not refer to `namespace` or the operation is not REPLACE.
*/
def unauthorizedAdd(
items: Seq[AuthorizedNamespaceDelegation]
)(implicit traceContext: TraceContext): Unit = {
@ -168,15 +163,6 @@ class AuthorizationGraph(
private def doAdd(
item: AuthorizedNamespaceDelegation
)(implicit traceContext: TraceContext): Unit = {
ErrorUtil.requireArgument(
item.mapping.namespace == namespace,
s"unable to add namespace delegation for ${item.mapping.namespace} to graph for $namespace",
)
ErrorUtil.requireArgument(
item.operation == Replace,
s"unable to add namespace delegation with operation ${item.operation} to graph for $namespace",
)
val targetKey = item.mapping.target.fingerprint
val curTarget = nodes.getOrElse(targetKey, GraphNode(targetKey))
// if this is a root certificate, remember it separately
@ -195,38 +181,32 @@ class AuthorizationGraph(
}
}
/** Check if `item` is authorized and, if so, remove its mapping from this graph.
* Note that addition and removal of a namespace delegation can be authorized by different keys.
*
* @throws java.lang.IllegalArgumentException if `item` does not refer to `namespace` or the operation is not REMOVE.
*/
def remove(item: AuthorizedNamespaceDelegation)(implicit traceContext: TraceContext): Boolean = {
ErrorUtil.requireArgument(
item.mapping.namespace == namespace,
s"unable to remove namespace delegation for ${item.mapping.namespace} from graph for $namespace",
)
ErrorUtil.requireArgument(
item.operation == Remove,
s"unable to remove namespace delegation with operation ${item.operation} from graph for $namespace",
)
if (existsAuthorizedKeyIn(item.signingKeys, requireRoot = true)) {
def remove(item: AuthorizedNamespaceDelegation)(implicit traceContext: TraceContext): Boolean =
if (areValidAuthorizationKeys(item.signingKeys, requireRoot = true)) {
doRemove(item)
true
} else false
def unauthorizedRemove(
items: Seq[AuthorizedNamespaceDelegation]
)(implicit traceContext: TraceContext): Unit = {
items.foreach(doRemove)
}
/** remove a namespace delegation
*
* The implementation is a bit tricky as the removal might have been authorized
* by a different key than the addition. This complicates the book-keeping,
* note that this one is a bit tricky as the removal might have been authorized
* by a different key than the addition. this is fine but it complicates the book-keeping,
* as we need to track for each target key what the "incoming authorizations" were solely for the
* purpose of being able to clean them up.
* purpose of being able to clean them up
*/
private def doRemove(
item: AuthorizedNamespaceDelegation
)(implicit traceContext: TraceContext): Unit = {
ErrorUtil.requireArgument(
item.mapping.namespace == namespace,
s"removing namespace ${item.mapping.namespace} from $namespace",
)
def myFilter(existing: AuthorizedNamespaceDelegation): Boolean = {
// the auth key doesn't need to match on removals
existing.mapping != item.mapping
@ -268,9 +248,10 @@ class AuthorizationGraph(
updateRemove(targetKey, curTarget.copy(incoming = curTarget.incoming.filter(myFilter)))
}
recompute()
case None => logger.warn(s"Superfluous removal of namespace delegation $item")
case None =>
logger.warn(s"Superfluous removal of namespace delegation $item")
}
}
protected def recompute()(implicit traceContext: TraceContext): Unit = {
@ -288,12 +269,12 @@ class AuthorizationGraph(
fingerprint: Fingerprint,
incoming: AuthorizedNamespaceDelegation,
): Unit = {
val current = cache.get(fingerprint)
val current = cache.getOrElseUpdate(fingerprint, None)
val currentLevel = AuthLevel.fromDelegationO(current)
val incomingLevel = AuthLevel.fromDelegationO(Some(incoming))
// this inherited level is higher than current, propagate it
if (incomingLevel > currentLevel) {
cache.update(fingerprint, incoming)
cache.update(fingerprint, Some(incoming))
// get the graph node of this fingerprint
nodes.get(fingerprint).foreach { graphNode =>
// iterate through all edges that depart from this node
@ -329,7 +310,7 @@ class AuthorizationGraph(
}
if (extraDebugInfo && logger.underlying.isDebugEnabled) {
val str =
cache.values
authorizedDelegations()
.map(aud =>
show"auth=${aud.signingKeys}, target=${aud.mapping.target.fingerprint}, root=${AuthorizedTopologyTransaction
.isRootCertificate(aud)}"
@ -339,99 +320,144 @@ class AuthorizationGraph(
}
} else
logger.debug(
s"Namespace $namespace has no root certificate, making all ${nodes.size} un-authorized"
s"Namespace ${namespace} has no root certificate, making all ${nodes.size} un-authorized"
)
override def existsAuthorizedKeyIn(
override def areValidAuthorizationKeys(
authKeys: Set[Fingerprint],
requireRoot: Boolean,
): Boolean = authKeys.exists(getAuthorizedKey(_, requireRoot).nonEmpty)
): Boolean = {
authKeys.exists { authKey =>
val authLevel = AuthLevel.fromDelegationO(cache.getOrElse(authKey, None))
authLevel.isRoot || (authLevel.isAuth && !requireRoot)
}
}
private def getAuthorizedKey(
authKey: Fingerprint,
override def getValidAuthorizationKeys(
authKeys: Set[Fingerprint],
requireRoot: Boolean,
): Option[SigningPublicKey] =
): Set[SigningPublicKey] = authKeys.flatMap(authKey =>
cache
.get(authKey)
.filter { delegation =>
val authLevel = AuthLevel.fromDelegationO(Some(delegation))
authLevel.isRoot || (authLevel.isAuth && !requireRoot)
}
.getOrElse(authKey, None)
.map(_.mapping.target)
.filter(_ => areValidAuthorizationKeys(Set(authKey), requireRoot))
)
override def keysSupportingAuthorization(
authKeys: Set[Fingerprint],
def authorizationChain(
startAuthKey: Fingerprint,
requireRoot: Boolean,
): Set[SigningPublicKey] = authKeys.flatMap(getAuthorizedKey(_, requireRoot))
): Option[AuthorizationChain] = {
@tailrec
def go(
authKey: Fingerprint,
requireRoot: Boolean,
acc: List[AuthorizedNamespaceDelegation],
): List[AuthorizedNamespaceDelegation] = {
cache.getOrElse(authKey, None) match {
// we've terminated with the root certificate
case Some(delegation) if AuthorizedTopologyTransaction.isRootCertificate(delegation) =>
delegation :: acc
// cert is valid, append it
case Some(delegation) if delegation.mapping.isRootDelegation || !requireRoot =>
go(delegation.signingKeys.head1, delegation.mapping.isRootDelegation, delegation :: acc)
// return empty to indicate failure
case _ => List.empty
}
}
go(startAuthKey, requireRoot, List.empty) match {
case Nil => None
case rest =>
Some(
AuthorizationChain(
identifierDelegation = Seq.empty,
namespaceDelegations = rest,
Seq.empty,
)
)
}
}
def authorizedDelegations(): Seq[AuthorizedNamespaceDelegation] =
cache.values.flatMap(_.toList).toSeq
override def toString: String = s"AuthorizationGraph($namespace)"
def debugInfo() = s"$namespace => ${nodes.mkString("\n")}"
}
trait AuthorizationCheck {
def areValidAuthorizationKeys(authKeys: Set[Fingerprint], requireRoot: Boolean): Boolean
/** Determines if a subset of the given keys is authorized to sign on behalf of the (possibly decentralized) namespace.
*
* @param requireRoot whether the authorization must be suitable to authorize namespace delegations
*/
def existsAuthorizedKeyIn(authKeys: Set[Fingerprint], requireRoot: Boolean): Boolean
/** Returns those keys that are useful for signing on behalf of the (possibly decentralized) namespace.
* Only keys with fingerprint in `authKeys` will be returned.
* The returned keys are not necessarily sufficient to authorize a transaction on behalf of the namespace;
* in case of a decentralized namespace, additional signatures may be required.
*/
def keysSupportingAuthorization(
def getValidAuthorizationKeys(
authKeys: Set[Fingerprint],
requireRoot: Boolean,
): Set[SigningPublicKey]
def authorizationChain(
startAuthKey: Fingerprint,
requireRoot: Boolean,
): Option[AuthorizationChain]
def authorizedDelegations(): Seq[AuthorizedNamespaceDelegation]
}
object AuthorizationCheck {
val empty: AuthorizationCheck = new AuthorizationCheck {
override def existsAuthorizedKeyIn(
val empty = new AuthorizationCheck {
override def areValidAuthorizationKeys(
authKeys: Set[Fingerprint],
requireRoot: Boolean,
): Boolean = false
override def keysSupportingAuthorization(
override def authorizationChain(
startAuthKey: Fingerprint,
requireRoot: Boolean,
): Option[AuthorizationChain] = None
override def getValidAuthorizationKeys(
authKeys: Set[Fingerprint],
requireRoot: Boolean,
): Set[SigningPublicKey] = Set.empty
override def authorizedDelegations(): Seq[AuthorizedNamespaceDelegation] = Seq.empty
override def toString: String = "AuthorizationCheck.empty"
}
}
/** Authorization graph for a decentralized namespace.
*
* @throws java.lang.IllegalArgumentException if `dnd` and `direct` refer to different namespaces.
*/
final case class DecentralizedNamespaceAuthorizationGraph(
dnd: DecentralizedNamespaceDefinition,
direct: AuthorizationGraph,
ownerGraphs: Seq[AuthorizationGraph],
) extends AuthorizationCheck {
require(
dnd.namespace == direct.namespace,
s"The direct graph refers to the wrong namespace (expected: ${dnd.namespace}, actual: ${direct.namespace}).",
)
override def existsAuthorizedKeyIn(
override def areValidAuthorizationKeys(
authKeys: Set[Fingerprint],
requireRoot: Boolean,
): Boolean = {
val viaNamespaceDelegation = direct.existsAuthorizedKeyIn(authKeys, requireRoot)
val viaNamespaceDelegation = direct.areValidAuthorizationKeys(authKeys, requireRoot)
val viaCollective =
ownerGraphs.count(_.existsAuthorizedKeyIn(authKeys, requireRoot)) >= dnd.threshold.value
ownerGraphs.count(_.areValidAuthorizationKeys(authKeys, requireRoot)) >= dnd.threshold.value
viaNamespaceDelegation || viaCollective
}
override def keysSupportingAuthorization(
import cats.syntax.foldable.*
override def getValidAuthorizationKeys(
authKeys: Set[Fingerprint],
requireRoot: Boolean,
): Set[SigningPublicKey] = {
(direct +: ownerGraphs)
.flatMap(_.keysSupportingAuthorization(authKeys, requireRoot))
.flatMap(_.getValidAuthorizationKeys(authKeys, requireRoot))
.toSet
}
override def authorizationChain(
startAuthKey: Fingerprint,
requireRoot: Boolean,
): Option[AuthorizationChain] =
direct
.authorizationChain(startAuthKey, requireRoot)
.orElse(ownerGraphs.map(_.authorizationChain(startAuthKey, requireRoot)).combineAll)
override def authorizedDelegations(): Seq[AuthorizedNamespaceDelegation] =
direct.authorizedDelegations() ++ ownerGraphs.flatMap(_.authorizedDelegations())
}

View File

@ -5,8 +5,7 @@ package com.digitalasset.canton.topology.processing
import cats.Monoid
import cats.data.EitherT
import cats.syntax.bifunctor.*
import cats.syntax.foldable.*
import cats.syntax.parallel.*
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.crypto.CryptoPureApi
import com.digitalasset.canton.data.CantonTimestamp
@ -21,10 +20,14 @@ import com.digitalasset.canton.topology.processing.AuthorizedTopologyTransaction
import com.digitalasset.canton.topology.store.ValidatedTopologyTransaction.GenericValidatedTopologyTransaction
import com.digitalasset.canton.topology.store.*
import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction
import com.digitalasset.canton.topology.transaction.TopologyMapping.RequiredAuthAuthorizations
import com.digitalasset.canton.topology.transaction.TopologyMapping.{
MappingHash,
RequiredAuthAuthorizations,
}
import com.digitalasset.canton.topology.transaction.TopologyTransaction.GenericTopologyTransaction
import com.digitalasset.canton.topology.transaction.*
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.FutureInstances.*
import scala.concurrent.{ExecutionContext, Future}
@ -127,14 +130,17 @@ class IncomingTopologyTransactionAuthorizationValidator(
*/
def validateAndUpdateHeadAuthState(
timestamp: CantonTimestamp,
toValidate: GenericSignedTopologyTransaction,
inStore: Option[GenericSignedTopologyTransaction],
transactionsToValidate: Seq[GenericSignedTopologyTransaction],
transactionsInStore: Map[MappingHash, GenericSignedTopologyTransaction],
expectFullAuthorization: Boolean,
)(implicit
traceContext: TraceContext
): Future[(UpdateAggregation, GenericValidatedTopologyTransaction)] = {
): Future[(UpdateAggregation, Seq[GenericValidatedTopologyTransaction])] = {
for {
authCheckResult <- determineRelevantUidsAndNamespaces(toValidate, inStore.map(_.transaction))
authCheckResult <- determineRelevantUidsAndNamespaces(
transactionsToValidate,
transactionsInStore.view.mapValues(_.transaction).toMap,
)
(updateAggregation, targetDomainVerified) = authCheckResult
loadGraphsF = loadAuthorizationGraphs(timestamp, updateAggregation.authNamespaces)
loadUidsF = loadIdentifierDelegationsCascading(
@ -147,11 +153,11 @@ class IncomingTopologyTransactionAuthorizationValidator(
} yield {
logger.debug(s"Update aggregation yielded ${updateAggregation}")
val validated = targetDomainVerified match {
val validated = targetDomainVerified.map {
case ValidatedTopologyTransaction(tx, None, _) =>
processTransaction(
tx,
inStore,
transactionsInStore.get(tx.mapping.uniqueKey),
expectFullAuthorization,
)
case v => v
@ -167,124 +173,101 @@ class IncomingTopologyTransactionAuthorizationValidator(
}
}
/** Validates a topology transaction as follows:
* <ol>
* <li>check that the transaction has valid signatures and is sufficiently authorized. if not, reject.</li>
* <li>if there are no missing authorizers, as is the case for proposals, we update internal caches for NSD, IDD, and DND</li>
* <li>if this validation is run to determine a final verdict, as is the case for processing topology transactions coming from the domain,
* automatically clear the proposal flag for transactions with sufficent authorizing signatures.</li>
* </ol>
*/
private def processTransaction(
toValidate: GenericSignedTopologyTransaction,
inStore: Option[GenericSignedTopologyTransaction],
expectFullAuthorization: Boolean,
)(implicit traceContext: TraceContext): GenericValidatedTopologyTransaction = {
// See validateRootCertificate why we need to check the removal of a root certificate explicitly here.
val signatureCheckResult = validateRootCertificate(toValidate)
.getOrElse(validateSignaturesAndDetermineMissingAuthorizers(toValidate, inStore))
val processedNs = toValidate.selectMapping[NamespaceDelegation].forall { sigTx =>
processNamespaceDelegation(
toValidate.operation,
AuthorizedTopologyTransaction(sigTx),
)
}
signatureCheckResult match {
val processedIdent = toValidate.selectMapping[IdentifierDelegation].forall { sigTx =>
processIdentifierDelegation(
toValidate.operation,
AuthorizedTopologyTransaction(sigTx),
)
}
val resultDns = toValidate.selectMapping[DecentralizedNamespaceDefinition].map { sigTx =>
processDecentralizedNamespaceDefinition(
sigTx.operation,
AuthorizedTopologyTransaction(sigTx),
)
}
val processedDns = resultDns.forall(_._1)
val mappingSpecificCheck = processedNs && processedIdent && processedDns
// the transaction is fully authorized if either
// 1. it's a root certificate, or
// 2. there is no authorization error and there are no missing authorizers
// We need to check explicitly for the root certificate here, because a REMOVE operation
// removes itself from the authorization graph, and therefore `isCurrentlyAuthorized` cannot validate it.
val authorizationResult =
if (NamespaceDelegation.isRootCertificate(toValidate))
Right(
(
toValidate,
RequiredAuthAuthorizations.empty, // no missing authorizers
)
)
else isCurrentlyAuthorized(toValidate, inStore)
authorizationResult match {
// propagate the rejection reason
case Left(rejectionReason) => ValidatedTopologyTransaction(toValidate, Some(rejectionReason))
// if a transaction wasn't outright rejected, run some additional checks
case Right((validatedTx, missingAuthorizers)) =>
handleSuccessfulSignatureChecks(
validatedTx,
missingAuthorizers,
expectFullAuthorization,
)
}
}
// The mappingSpecificCheck is a necessary condition for having sufficient authorizers.
val isFullyAuthorized =
mappingSpecificCheck && missingAuthorizers.isEmpty
private def handleSuccessfulSignatureChecks(
toValidate: GenericSignedTopologyTransaction,
missingAuthorizers: RequiredAuthAuthorizations,
expectFullAuthorization: Boolean,
)(implicit
traceContext: TraceContext
): ValidatedTopologyTransaction[TopologyChangeOp, TopologyMapping] = {
// if there are no missing authorizers, we can update the internal caches
val isFullyAuthorized = if (missingAuthorizers.isEmpty) {
val processedNSD = toValidate
.selectMapping[NamespaceDelegation]
.forall { sigTx => processNamespaceDelegation(AuthorizedTopologyTransaction(sigTx)) }
val processedIDD = toValidate.selectMapping[IdentifierDelegation].forall { sigTx =>
processIdentifierDelegation(AuthorizedTopologyTransaction(sigTx))
}
val processedDND =
toValidate.selectMapping[DecentralizedNamespaceDefinition].forall { sigTx =>
processDecentralizedNamespaceDefinition(AuthorizedTopologyTransaction(sigTx))
// If a decentralizedNamespace transaction is fully authorized, reflect so in the decentralizedNamespace cache.
// Note: It seems a bit unsafe to update the caches on the assumption that the update will also be eventually
// persisted by the caller (a few levels up the call chain in TopologyStateProcessor.validateAndApplyAuthorization
// as the caller performs additional checks such as the numeric value of the serial number).
// But at least this is safer than where the check was previously (inside processDecentralizedNamespaceDefinition before even
// `isCurrentlyAuthorized` above had finished all checks).
if (isFullyAuthorized) {
resultDns.foreach { case (_, updateDecentralizedNamespaceCache) =>
updateDecentralizedNamespaceCache()
}
}
val mappingSpecificCheck = processedNSD && processedIDD && processedDND
if (!mappingSpecificCheck) {
logger.debug(s"Mapping specific check failed")
}
mappingSpecificCheck
} else { false }
val acceptMissingAuthorizers =
toValidate.isProposal && !expectFullAuthorization
val acceptMissingAuthorizers =
validatedTx.isProposal && !expectFullAuthorization
// if the result of this validation is final (when processing transactions for the authorized store
// or sequenced transactions from the domain) we set the proposal flag according to whether the transaction
// is fully authorized or not.
// This must not be done when preliminarily validating transactions via the DomainTopologyManager, because
// the validation outcome might change when validating the transaction again after it has been sequenced.
val finalTransaction =
if (validationIsFinal) toValidate.copy(isProposal = !isFullyAuthorized)
else toValidate
// if the result of this validation is final (when processing transactions for the authorized store
// or sequenced transactions from the domain) we set the proposal flag according to whether the transaction
// is fully authorized or not.
// This must not be done when preliminarily validating transactions via the DomainTopologyManager, because
// the validation outcome might change when validating the transaction again after it has been sequenced.
val finalTransaction =
if (validationIsFinal) validatedTx.copy(isProposal = !isFullyAuthorized)
else validatedTx
// Either the transaction is fully authorized or the request allows partial authorization
if (isFullyAuthorized || acceptMissingAuthorizers) {
ValidatedTopologyTransaction(finalTransaction, None)
} else {
if (!missingAuthorizers.isEmpty) {
logger.debug(s"Missing authorizers: $missingAuthorizers")
}
ValidatedTopologyTransaction(
toValidate,
Some(TopologyTransactionRejection.NotAuthorized),
)
// Either the transaction is fully authorized or the request allows partial authorization
if (isFullyAuthorized || acceptMissingAuthorizers) {
ValidatedTopologyTransaction(finalTransaction, None)
} else {
if (!missingAuthorizers.isEmpty) {
logger.debug(s"Missing authorizers: $missingAuthorizers")
}
if (!mappingSpecificCheck) {
logger.debug(s"Mapping specific check failed")
}
ValidatedTopologyTransaction(
toValidate,
Some(TopologyTransactionRejection.NotAuthorized),
)
}
}
}
/** Validates the signature of the removal of a root certificate.
* This check is done separately from the mechanism used for other topology transactions (ie isCurrentlyAuthorized),
* because removing a root certificate removes it from the authorization graph and therefore
* isCurrentlyAuthorized would not find the key to validate it.
*/
private def validateRootCertificate(
toValidate: GenericSignedTopologyTransaction
): Option[Either[
TopologyTransactionRejection,
(GenericSignedTopologyTransaction, RequiredAuthAuthorizations),
]] = {
toValidate
.selectMapping[NamespaceDelegation]
.filter(NamespaceDelegation.isRootCertificate)
.map { rootCert =>
val result = rootCert.signatures.toSeq.forgetNE
.traverse_(
pureCrypto
.verifySignature(
rootCert.hash.hash,
rootCert.mapping.target,
_,
)
)
.bimap(
TopologyTransactionRejection.SignatureCheckFailed,
_ => (toValidate, RequiredAuthAuthorizations.empty /* no missing authorizers */ ),
)
result
}
}
/** loads all identifier delegations into the identifier delegation cache
*
* This function has two "modes". On a cascading update affecting namespaces, we have
@ -308,15 +291,16 @@ class IncomingTopologyTransactionAuthorizationValidator(
}
private def processIdentifierDelegation(
tx: AuthorizedIdentifierDelegation
op: TopologyChangeOp,
tx: AuthorizedIdentifierDelegation,
): Boolean = {
// check authorization
val check = getAuthorizationCheckForNamespace(tx.mapping.identifier.namespace)
val keysAreValid = check.existsAuthorizedKeyIn(tx.signingKeys, requireRoot = false)
val keysAreValid = check.areValidAuthorizationKeys(tx.signingKeys, requireRoot = false)
// update identifier delegation cache if necessary
if (keysAreValid) {
val updateOp: Set[AuthorizedIdentifierDelegation] => Set[AuthorizedIdentifierDelegation] =
tx.operation match {
op match {
case TopologyChangeOp.Replace =>
x => x + tx
case TopologyChangeOp.Remove =>
@ -329,11 +313,12 @@ class IncomingTopologyTransactionAuthorizationValidator(
}
private def processNamespaceDelegation(
tx: AuthorizedNamespaceDelegation
op: TopologyChangeOp,
tx: AuthorizedNamespaceDelegation,
)(implicit traceContext: TraceContext): Boolean = {
val graph = getAuthorizationGraphForNamespace(tx.mapping.namespace)
// add or remove including authorization check
tx.operation match {
op match {
case TopologyChangeOp.Replace => graph.add(tx)
case TopologyChangeOp.Remove => graph.remove(tx)
}
@ -345,8 +330,9 @@ class IncomingTopologyTransactionAuthorizationValidator(
* by the caller once the mapping is to be committed.
*/
private def processDecentralizedNamespaceDefinition(
tx: AuthorizedDecentralizedNamespaceDefinition
)(implicit traceContext: TraceContext): Boolean = {
op: TopologyChangeOp,
tx: AuthorizedDecentralizedNamespaceDefinition,
)(implicit traceContext: TraceContext): (Boolean, () => Unit) = {
val decentralizedNamespace = tx.mapping.namespace
val dnsGraph = decentralizedNamespaceCache
.get(decentralizedNamespace)
@ -374,30 +360,26 @@ class IncomingTopologyTransactionAuthorizationValidator(
)
newDecentralizedNamespaceGraph
}
val isAuthorized = dnsGraph.existsAuthorizedKeyIn(tx.signingKeys, requireRoot = false)
val isAuthorized = dnsGraph.areValidAuthorizationKeys(tx.signingKeys, false)
if (isAuthorized) {
tx.operation match {
case TopologyChangeOp.Remove =>
decentralizedNamespaceCache.remove(decentralizedNamespace).discard
case TopologyChangeOp.Replace =>
val ownerGraphs = tx.mapping.owners.forgetNE.toSeq.map(getAuthorizationGraphForNamespace)
decentralizedNamespaceCache
.put(
decentralizedNamespace,
(tx.mapping, dnsGraph.copy(dnd = tx.mapping, ownerGraphs = ownerGraphs)),
)
.discard
}
}
isAuthorized
(
isAuthorized,
() => {
val ownerGraphs = tx.mapping.owners.forgetNE.toSeq.map(getAuthorizationGraphForNamespace)
decentralizedNamespaceCache
.put(
decentralizedNamespace,
(tx.mapping, dnsGraph.copy(dnd = tx.mapping, ownerGraphs = ownerGraphs)),
)
.discard
},
)
}
private def determineRelevantUidsAndNamespaces(
toValidate: GenericSignedTopologyTransaction,
inStore: Option[GenericTopologyTransaction],
): Future[(UpdateAggregation, GenericValidatedTopologyTransaction)] = {
transactionsToValidate: Seq[GenericSignedTopologyTransaction],
transactionsInStore: Map[MappingHash, GenericTopologyTransaction],
): Future[(UpdateAggregation, Seq[GenericValidatedTopologyTransaction])] = {
def verifyDomain(
tx: GenericSignedTopologyTransaction
): Either[TopologyTransactionRejection, Unit] =
@ -413,19 +395,22 @@ class IncomingTopologyTransactionAuthorizationValidator(
// we need to figure out for which namespaces and uids we need to load the validation checks
// and for which uids and namespaces we'll have to perform a cascading update
EitherT
.fromEither[Future](verifyDomain(toValidate))
.fold(
rejection =>
(UpdateAggregation(), ValidatedTopologyTransaction(toValidate, Some(rejection))),
_ =>
(
UpdateAggregation().add(
toValidate.mapping,
inStore,
import UpdateAggregation.monoid
transactionsToValidate.parFoldMapA { toValidate =>
EitherT
.fromEither[Future](verifyDomain(toValidate))
.fold(
rejection =>
(UpdateAggregation(), Seq(ValidatedTopologyTransaction(toValidate, Some(rejection)))),
_ =>
(
UpdateAggregation().add(
toValidate.mapping,
transactionsInStore.get(toValidate.mapping.uniqueKey),
),
Seq(ValidatedTopologyTransaction(toValidate, None)),
),
ValidatedTopologyTransaction(toValidate, None),
),
)
)
}
}
}

View File

@ -43,7 +43,7 @@ trait TransactionAuthorizationValidator {
protected def pureCrypto: CryptoPureApi
def validateSignaturesAndDetermineMissingAuthorizers(
def isCurrentlyAuthorized(
toValidate: GenericSignedTopologyTransaction,
inStore: Option[GenericSignedTopologyTransaction],
)(implicit
@ -72,41 +72,41 @@ trait TransactionAuthorizationValidator {
val namespaceWithRootAuthorizations =
required.namespacesWithRoot.map { ns =>
val check = getAuthorizationCheckForNamespace(ns)
val keysUsed = check.keysSupportingAuthorization(
val keysWithDelegation = check.getValidAuthorizationKeys(
signingKeys,
requireRoot = true,
)
val keysAuthorizeNamespace =
check.existsAuthorizedKeyIn(signingKeys, requireRoot = true)
(ns -> (keysAuthorizeNamespace, keysUsed))
check.areValidAuthorizationKeys(signingKeys, requireRoot = true)
(ns -> (keysAuthorizeNamespace, keysWithDelegation))
}.toMap
// Now let's determine which namespaces and uids actually delegated to any of the keys
val namespaceAuthorizations = required.namespaces.map { ns =>
val check = getAuthorizationCheckForNamespace(ns)
val keysUsed = check.keysSupportingAuthorization(
val keysWithDelegation = check.getValidAuthorizationKeys(
signingKeys,
requireRoot = false,
)
val keysAuthorizeNamespace = check.existsAuthorizedKeyIn(signingKeys, requireRoot = false)
(ns -> (keysAuthorizeNamespace, keysUsed))
val keysAuthorizeNamespace = check.areValidAuthorizationKeys(signingKeys, requireRoot = false)
(ns -> (keysAuthorizeNamespace, keysWithDelegation))
}.toMap
val uidAuthorizations =
required.uids.map { uid =>
val check = getAuthorizationCheckForNamespace(uid.namespace)
val keysUsed = check.keysSupportingAuthorization(
val keysWithDelegation = check.getValidAuthorizationKeys(
signingKeys,
requireRoot = false,
)
val keysAuthorizeNamespace =
check.existsAuthorizedKeyIn(signingKeys, requireRoot = false)
check.areValidAuthorizationKeys(signingKeys, requireRoot = false)
val keyForUid =
getAuthorizedIdentifierDelegation(check, uid, toValidate.signatures.map(_.signedBy))
.map(_.mapping.target)
(uid -> (keysAuthorizeNamespace || keyForUid.nonEmpty, keysUsed ++ keyForUid))
(uid -> (keysAuthorizeNamespace || keyForUid.nonEmpty, keysWithDelegation ++ keyForUid))
}.toMap
val extraKeyAuthorizations = {
@ -132,7 +132,7 @@ trait TransactionAuthorizationValidator {
.toMap
}
val allKeysUsedForAuthorization =
val allAuthorizingKeys =
(namespaceWithRootAuthorizations.values ++
namespaceAuthorizations.values ++
uidAuthorizations.values ++
@ -145,9 +145,9 @@ trait TransactionAuthorizationValidator {
logAuthorizations("Authorizations for UIDs", uidAuthorizations)
logAuthorizations("Authorizations for extraKeys", extraKeyAuthorizations)
logger.debug(s"All keys used for authorization: ${allKeysUsedForAuthorization.keySet}")
logger.debug(s"All authorizing keys: ${allAuthorizingKeys.keySet}")
val superfluousKeys = signingKeys -- allKeysUsedForAuthorization.keys
val superfluousKeys = signingKeys -- allAuthorizingKeys.keys
for {
_ <- Either.cond[TopologyTransactionRejection, Unit](
// there must be at least 1 key used for the signatures for one of the delegation mechanisms
@ -160,7 +160,7 @@ trait TransactionAuthorizationValidator {
},
)
txWithSignaturesToVerify <- toValidate
txWithValidSignatures <- toValidate
.removeSignatures(superfluousKeys)
.toRight({
logger.info(
@ -169,9 +169,9 @@ trait TransactionAuthorizationValidator {
TopologyTransactionRejection.NoDelegationFoundForKeys(superfluousKeys)
})
_ <- txWithSignaturesToVerify.signatures.forgetNE.toList
_ <- txWithValidSignatures.signatures.forgetNE.toList
.traverse_(sig =>
allKeysUsedForAuthorization
allAuthorizingKeys
.get(sig.signedBy)
.toRight({
val msg =
@ -182,7 +182,7 @@ trait TransactionAuthorizationValidator {
.flatMap(key =>
pureCrypto
.verifySignature(
txWithSignaturesToVerify.hash.hash,
txWithValidSignatures.hash.hash,
key,
sig,
)
@ -202,7 +202,7 @@ trait TransactionAuthorizationValidator {
extraKeys = onlyFullyAuthorized(extraKeyAuthorizations),
)
(
txWithSignaturesToVerify,
txWithValidSignatures,
requiredAuth
.satisfiedByActualAuthorizers(actual)
.fold(identity, _ => RequiredAuthAuthorizations.empty),
@ -236,7 +236,7 @@ trait TransactionAuthorizationValidator {
): Option[AuthorizedIdentifierDelegation] = {
getIdentifierDelegationsForUid(uid)
.find(aid =>
authKeys(aid.mapping.target.id) && graph.existsAuthorizedKeyIn(
authKeys(aid.mapping.target.id) && graph.areValidAuthorizationKeys(
aid.signingKeys,
requireRoot = false,
)
@ -254,7 +254,9 @@ trait TransactionAuthorizationValidator {
namespace: Namespace
): AuthorizationCheck = {
val decentralizedNamespaceCheck = decentralizedNamespaceCache.get(namespace).map(_._2)
val namespaceCheck = namespaceCache.get(namespace)
val namespaceCheck = namespaceCache.get(
namespace
)
decentralizedNamespaceCheck
.orElse(namespaceCheck)
.getOrElse(AuthorizationCheck.empty)

View File

@ -5,7 +5,7 @@ package com.digitalasset.canton.topology.store
import cats.data.EitherT
import cats.syntax.traverse.*
import com.daml.lf.data.Ref.PackageId
import com.digitalasset.daml.lf.data.Ref.PackageId
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.ProtoDeserializationError
import com.digitalasset.canton.config.CantonRequireTypes.{LengthLimitedString, String255}

View File

@ -10,9 +10,14 @@ import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.logging.ErrorLoggingContext
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}
import com.digitalasset.canton.protocol.OnboardingRestriction
import com.digitalasset.canton.topology.*
import com.digitalasset.canton.topology.processing.EffectiveTime
import com.digitalasset.canton.topology.transaction.TopologyMapping
import com.digitalasset.canton.topology.{
DomainId,
Member,
ParticipantId,
PartyId,
TopologyManagerError,
}
sealed trait TopologyTransactionRejection extends PrettyPrinting with Product with Serializable {
def asString: String
@ -40,12 +45,25 @@ object TopologyTransactionRejection {
TopologyManagerError.UnauthorizedTransaction.Failure(asString)
}
final case class ThresholdTooHigh(actual: Int, mustBeAtMost: Int)
extends TopologyTransactionRejection {
override def asString: String =
s"Threshold must not be higher than $mustBeAtMost, but was $actual."
override def pretty: Pretty[ThresholdTooHigh] = prettyOfString(_ => asString)
override def toTopologyManagerError(implicit elc: ErrorLoggingContext) = {
TopologyManagerError.InvalidThreshold.ThresholdTooHigh(actual, mustBeAtMost)
}
}
final case class UnknownParties(parties: Seq[PartyId]) extends TopologyTransactionRejection {
override def asString: String = s"Parties ${parties.sorted.mkString(", ")} are unknown."
override def pretty: Pretty[UnknownParties.this.type] = prettyOfString(_ => asString)
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
TopologyManagerError.UnknownParties.Failure(parties)
}
final case class OnboardingRestrictionInPlace(
@ -174,25 +192,6 @@ object TopologyTransactionRejection {
)
}
final case class PartyExceedsHostingLimit(
partyId: PartyId,
limit: Int,
numParticipants: Int,
) extends TopologyTransactionRejection {
override def asString: String =
s"Party $partyId exceeds hosting limit of $limit with desired number of $numParticipants hosting participants."
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
TopologyManagerError.PartyExceedsHostingLimit.Reject(partyId, limit, numParticipants)
override def pretty: Pretty[PartyExceedsHostingLimit.this.type] =
prettyOfClass(
param("partyId", _.partyId),
param("limit", _.limit),
param("number of hosting participants", _.numParticipants),
)
}
final case class MissingMappings(missing: Map[Member, Seq[TopologyMapping.Code]])
extends TopologyTransactionRejection {
override def asString: String = {
@ -210,24 +209,4 @@ object TopologyTransactionRejection {
override def pretty: Pretty[MissingMappings.this.type] = prettyOfString(_ => asString)
}
final case class MissingDomainParameters(effective: EffectiveTime)
extends TopologyTransactionRejection {
override def asString: String = s"Missing domain parameters at $effective"
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
TopologyManagerError.MissingTopologyMapping.MissingDomainParameters(effective)
override def pretty: Pretty[MissingDomainParameters.this.type] = prettyOfString(_ => asString)
}
final case class NamespaceAlreadyInUse(namespace: Namespace)
extends TopologyTransactionRejection {
override def asString: String = s"The namespace $namespace is already used by another entity."
override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError =
TopologyManagerError.NamespaceAlreadyInUse.Reject(namespace)
override def pretty: Pretty[NamespaceAlreadyInUse.this.type] = prettyOfString(_ => asString)
}
}

View File

@ -401,14 +401,14 @@ object NamespaceDelegation {
target: SigningPublicKey,
isRootDelegation: Boolean,
): NamespaceDelegation =
create(namespace, target, isRootDelegation).valueOr(err =>
throw new IllegalArgumentException((err))
)
create(namespace, target, isRootDelegation).fold(err => sys.error(err), identity)
def code: TopologyMapping.Code = Code.NamespaceDelegation
/** Returns true if the given transaction is a self-signed root certificate */
def isRootCertificate(sit: GenericSignedTopologyTransaction): Boolean = {
((sit.operation == TopologyChangeOp.Replace && sit.serial == PositiveInt.one) ||
(sit.operation == TopologyChangeOp.Remove && sit.serial != PositiveInt.one)) &&
sit.mapping
.select[transaction.NamespaceDelegation]
.exists(ns =>
@ -944,8 +944,8 @@ final case class PartyHostingLimits(
override def code: Code = Code.PartyHostingLimits
override def namespace: Namespace = partyId.namespace
override def maybeUid: Option[UniqueIdentifier] = Some(partyId.uid)
override def namespace: Namespace = domainId.namespace
override def maybeUid: Option[UniqueIdentifier] = Some(domainId.uid)
override def restrictedToDomain: Option[DomainId] = Some(domainId)
@ -1057,7 +1057,7 @@ object HostingParticipant {
} yield HostingParticipant(participantId, permission)
}
final case class PartyToParticipant private (
final case class PartyToParticipant(
partyId: PartyId,
domainId: Option[DomainId],
threshold: PositiveInt,
@ -1135,51 +1135,6 @@ final case class PartyToParticipant private (
object PartyToParticipant {
def create(
partyId: PartyId,
domainId: Option[DomainId],
threshold: PositiveInt,
participants: Seq[HostingParticipant],
groupAddressing: Boolean,
): Either[String, PartyToParticipant] = {
val noDuplicatePParticipants = {
val duplicatePermissions =
participants.groupBy(_.participantId).values.filter(_.size > 1).toList
Either.cond(
duplicatePermissions.isEmpty,
(),
s"Participants may only be assigned one permission: $duplicatePermissions",
)
}
val thresholdCanBeMet = {
val numConfirmingParticipants =
participants.count(_.permission >= ParticipantPermission.Confirmation)
Either
.cond(
// we allow to not meet the threshold criteria if there are only observing participants.
// but as soon as there is 1 confirming participant, the threshold must theoretically be satisfiable,
// otherwise the party can never confirm a transaction.
numConfirmingParticipants == 0 || threshold.value <= numConfirmingParticipants,
(),
s"Party $partyId cannot meet threshold of $threshold confirming participants with participants $participants",
)
.map(_ => PartyToParticipant(partyId, domainId, threshold, participants, groupAddressing))
}
noDuplicatePParticipants.flatMap(_ => thresholdCanBeMet)
}
def tryCreate(
partyId: PartyId,
domainId: Option[DomainId],
threshold: PositiveInt,
participants: Seq[HostingParticipant],
groupAddressing: Boolean,
): PartyToParticipant =
create(partyId, domainId, threshold, participants, groupAddressing).valueOr(err =>
throw new IllegalArgumentException(err)
)
def uniqueKey(partyId: PartyId, domainId: Option[DomainId]): MappingHash =
TopologyMapping.buildUniqueKey(code)(
_.add(partyId.toProtoPrimitive).add(domainId.fold("")(_.toProtoPrimitive))
@ -1203,7 +1158,7 @@ object PartyToParticipant {
}
// AuthorityOf
final case class AuthorityOf private (
final case class AuthorityOf(
partyId: PartyId,
domainId: Option[DomainId],
threshold: PositiveInt,
@ -1244,21 +1199,6 @@ final case class AuthorityOf private (
object AuthorityOf {
def create(
partyId: PartyId,
domainId: Option[DomainId],
threshold: PositiveInt,
parties: Seq[PartyId],
): Either[String, AuthorityOf] = {
Either
.cond(
threshold.value <= parties.size,
(),
s"Invalid threshold $threshold for $partyId with authorizers $parties",
)
.map(_ => AuthorityOf(partyId, domainId, threshold, parties))
}
def uniqueKey(partyId: PartyId, domainId: Option[DomainId]): MappingHash =
TopologyMapping.buildUniqueKey(code)(
_.add(partyId.toProtoPrimitive).add(domainId.fold("")(_.toProtoPrimitive))
@ -1277,9 +1217,7 @@ object AuthorityOf {
if (value.domain.nonEmpty)
DomainId.fromProtoPrimitive(value.domain, "domain").map(_.some)
else Right(None)
authorityOf <- create(partyId, domainId, threshold, parties)
.leftMap(ProtoDeserializationError.OtherError)
} yield authorityOf
} yield AuthorityOf(partyId, domainId, threshold, parties)
}
/** Dynamic domain parameter settings for the domain

View File

@ -5,19 +5,14 @@ package com.digitalasset.canton.topology.transaction
import cats.data.EitherT
import cats.instances.future.*
import cats.instances.order.*
import cats.syntax.semigroup.*
import com.digitalasset.canton.crypto.KeyPurpose
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.protocol.{DynamicDomainParameters, OnboardingRestriction}
import com.digitalasset.canton.protocol.OnboardingRestriction
import com.digitalasset.canton.topology.*
import com.digitalasset.canton.topology.processing.EffectiveTime
import com.digitalasset.canton.topology.store.StoredTopologyTransactions.PositiveStoredTopologyTransactions
import com.digitalasset.canton.topology.store.TopologyTransactionRejection.{
InvalidTopologyMapping,
NamespaceAlreadyInUse,
}
import com.digitalasset.canton.topology.store.{
TopologyStore,
TopologyStoreId,
@ -29,6 +24,7 @@ import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.EitherTUtil
import scala.concurrent.{ExecutionContext, Future}
import scala.math.Ordered.*
trait TopologyMappingChecks {
def checkTransaction(
@ -131,27 +127,6 @@ class ValidatingTopologyMappingChecks(
.select[TopologyChangeOp.Replace, AuthorityOf]
.map(checkAuthorityOf(effective, _))
case (
Code.DecentralizedNamespaceDefinition,
None | Some(Code.DecentralizedNamespaceDefinition),
) =>
toValidate
.select[TopologyChangeOp.Replace, DecentralizedNamespaceDefinition]
.map(
checkDecentralizedNamespaceDefinitionReplace(
_,
inStore.flatMap(_.select[TopologyChangeOp, DecentralizedNamespaceDefinition]),
)
)
case (
Code.NamespaceDelegation,
None | Some(Code.NamespaceDelegation),
) =>
toValidate
.select[TopologyChangeOp.Replace, NamespaceDelegation]
.map(checkNamespaceDelegationReplace)
case otherwise => None
}
@ -215,33 +190,6 @@ class ValidatingTopologyMappingChecks(
ensureParticipantDoesNotHostParties(effective, toValidate.mapping.participantId)
}
private def loadDomainParameters(
effective: EffectiveTime
)(implicit
traceContext: TraceContext
): EitherT[Future, TopologyTransactionRejection, DynamicDomainParameters] = {
loadFromStore(effective, DomainParametersState.code).subflatMap { domainParamCandidates =>
val params = domainParamCandidates.result.view
.flatMap(_.selectMapping[DomainParametersState])
.map(_.mapping.parameters)
.toList
params match {
case Nil =>
logger.error(
"Can not determine domain parameters."
)
Left(TopologyTransactionRejection.MissingDomainParameters(effective))
case param :: Nil => Right(param)
case param :: rest =>
logger.error(
s"Multiple domain parameters at ${effective} ${rest.size + 1}. Using first one: $param."
)
Right(param)
}
}
}
private def checkDomainTrustCertificateReplace(
effective: EffectiveTime,
toValidate: SignedTopologyTransaction[TopologyChangeOp, DomainTrustCertificate],
@ -251,7 +199,25 @@ class ValidatingTopologyMappingChecks(
def loadOnboardingRestriction()
: EitherT[Future, TopologyTransactionRejection, OnboardingRestriction] = {
loadDomainParameters(effective).map(_.onboardingRestriction)
loadFromStore(effective, DomainParametersState.code).map { domainParamCandidates =>
val restrictions = domainParamCandidates.result.view
.flatMap(_.selectMapping[DomainParametersState])
.map(_.mapping.parameters.onboardingRestriction)
.toList
restrictions match {
case Nil =>
logger.error(
"Can not determine the onboarding restriction. Assuming the domain is locked."
)
OnboardingRestriction.RestrictedLocked
case param :: Nil => param
case param :: rest =>
logger.error(
s"Multiple domain parameters at ${effective} ${rest.size + 1}. Using first one with restriction ${param}."
)
param
}
}
}
def checkDomainIsNotLocked(restriction: OnboardingRestriction) = {
@ -345,97 +311,65 @@ class ValidatingTopologyMappingChecks(
traceContext: TraceContext
): EitherT[Future, TopologyTransactionRejection, Unit] = {
import toValidate.mapping
def checkParticipants() = {
val newParticipants = mapping.participants.map(_.participantId).toSet --
inStore.toList.flatMap(_.mapping.participants.map(_.participantId))
for {
participantTransactions <- EitherT.right[TopologyTransactionRejection](
store
.findPositiveTransactions(
CantonTimestamp.MaxValue,
asOfInclusive = false,
isProposal = false,
types = Seq(DomainTrustCertificate.code, OwnerToKeyMapping.code),
filterUid = Some(newParticipants.toSeq.map(_.uid)),
filterNamespace = None,
)
)
// check that all participants are known on the domain
missingParticipantCertificates = newParticipants -- participantTransactions
.collectOfMapping[DomainTrustCertificate]
.result
.map(_.mapping.participantId)
_ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
missingParticipantCertificates.isEmpty,
TopologyTransactionRejection.UnknownMembers(missingParticipantCertificates.toSeq),
)
// check that all known participants have keys registered
participantsWithInsufficientKeys =
newParticipants -- participantTransactions
.collectOfMapping[OwnerToKeyMapping]
.result
.view
.filter { tx =>
val keyPurposes = tx.mapping.keys.map(_.purpose).toSet
requiredKeyPurposes.forall(keyPurposes)
}
.map(_.mapping.member)
.collect { case pid: ParticipantId => pid }
.toSeq
_ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
participantsWithInsufficientKeys.isEmpty,
TopologyTransactionRejection.InsufficientKeys(participantsWithInsufficientKeys.toSeq),
)
} yield {
()
}
}
def checkHostingLimits(effective: EffectiveTime) = for {
hostingLimitsCandidates <- loadFromStore(
effective,
code = PartyHostingLimits.code,
filterUid = Some(Seq(toValidate.mapping.partyId.uid)),
)
hostingLimits = hostingLimitsCandidates.result.view
.flatMap(_.selectMapping[PartyHostingLimits])
.map(_.mapping.quota)
.toList
partyHostingLimit = hostingLimits match {
case Nil => // No hosting limits found. This is expected if no restrictions are in place
None
case quota :: Nil => Some(quota)
case multiple @ (quota :: _) =>
logger.error(
s"Multiple PartyHostingLimits at ${effective} ${multiple.size}. Using first one with quota $quota."
)
Some(quota)
}
// TODO(#14050) load default party hosting limits from dynamic domain parameters in case the party
// doesn't have a specific PartyHostingLimits mapping issued by the domain.
_ <- partyHostingLimit match {
case Some(limit) =>
EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
toValidate.mapping.participants.size <= limit,
TopologyTransactionRejection.PartyExceedsHostingLimit(
toValidate.mapping.partyId,
limit,
toValidate.mapping.participants.size,
),
)
case None => EitherTUtil.unit[TopologyTransactionRejection]
}
} yield ()
val numConfirmingParticipants =
mapping.participants.count(_.permission >= ParticipantPermission.Confirmation)
for {
_ <- checkParticipants()
_ <- checkHostingLimits(EffectiveTime.MaxValue)
} yield ()
// check the threshold
_ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
mapping.threshold.value <= numConfirmingParticipants,
TopologyTransactionRejection.ThresholdTooHigh(
mapping.threshold.value,
numConfirmingParticipants,
),
)
newParticipants = mapping.participants.map(_.participantId).toSet --
inStore.toList.flatMap(_.mapping.participants.map(_.participantId))
participantTransactions <- EitherT.right[TopologyTransactionRejection](
store
.findPositiveTransactions(
CantonTimestamp.MaxValue,
asOfInclusive = false,
isProposal = false,
types = Seq(DomainTrustCertificate.code, OwnerToKeyMapping.code),
filterUid = Some(newParticipants.toSeq.map(_.uid)),
filterNamespace = None,
)
)
// check that all participants are known on the domain
missingParticipantCertificates = newParticipants -- participantTransactions
.collectOfMapping[DomainTrustCertificate]
.result
.map(_.mapping.participantId)
_ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
missingParticipantCertificates.isEmpty,
TopologyTransactionRejection.UnknownMembers(missingParticipantCertificates.toSeq),
)
// check that all known participants have keys registered
participantsWithInsufficientKeys =
newParticipants -- participantTransactions
.collectOfMapping[OwnerToKeyMapping]
.result
.view
.filter { tx =>
val keyPurposes = tx.mapping.keys.map(_.purpose).toSet
requiredKeyPurposes.forall(keyPurposes)
}
.map(_.mapping.member)
.collect { case pid: ParticipantId => pid }
.toSeq
_ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection](
participantsWithInsufficientKeys.isEmpty,
TopologyTransactionRejection.InsufficientKeys(participantsWithInsufficientKeys.toSeq),
)
} yield {
()
}
}
private def checkOwnerToKeyMappingReplace(
@ -531,7 +465,15 @@ class ValidatingTopologyMappingChecks(
val newMediators = (toValidate.mapping.allMediatorsInGroup.toSet -- inStore.toList.flatMap(
_.mapping.allMediatorsInGroup
)).map(identity[Member])
checkMissingNsdAndOtkMappings(effectiveTime, newMediators)
val thresholdCheck = EitherTUtil.condUnitET(
toValidate.mapping.threshold.value <= toValidate.mapping.active.size,
TopologyTransactionRejection.ThresholdTooHigh(
toValidate.mapping.threshold.value,
toValidate.mapping.active.size,
),
)
thresholdCheck.flatMap(_ => checkMissingNsdAndOtkMappings(effectiveTime, newMediators))
}
private def checkSequencerDomainStateReplace(
@ -543,7 +485,14 @@ class ValidatingTopologyMappingChecks(
_.mapping.allSequencers
)).map(identity[Member])
checkMissingNsdAndOtkMappings(effectiveTime, newSequencers)
val thresholdCheck = EitherTUtil.condUnitET(
toValidate.mapping.threshold.value <= toValidate.mapping.active.size,
TopologyTransactionRejection.ThresholdTooHigh(
toValidate.mapping.threshold.value,
toValidate.mapping.active.size,
),
)
thresholdCheck.flatMap(_ => checkMissingNsdAndOtkMappings(effectiveTime, newSequencers))
}
private def checkAuthorityOf(
@ -572,85 +521,15 @@ class ValidatingTopologyMappingChecks(
}
}
checkPartiesAreKnown()
}
private def checkDecentralizedNamespaceDefinitionReplace(
toValidate: SignedTopologyTransaction[
TopologyChangeOp.Replace,
DecentralizedNamespaceDefinition,
],
inStore: Option[SignedTopologyTransaction[
TopologyChangeOp,
DecentralizedNamespaceDefinition,
]],
)(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] = {
def checkDecentralizedNamespaceDerivedFromOwners()
: EitherT[Future, TopologyTransactionRejection, Unit] =
if (inStore.isEmpty) {
// The very first decentralized namespace definition must have namespace computed from the owners
EitherTUtil.condUnitET(
toValidate.mapping.namespace == DecentralizedNamespaceDefinition
.computeNamespace(toValidate.mapping.owners),
InvalidTopologyMapping(
s"The decentralized namespace ${toValidate.mapping.namespace} is not derived from the owners ${toValidate.mapping.owners.toSeq.sorted}"
),
)
} else {
EitherTUtil.unit
}
def checkNoClashWithRootCertificates()(implicit
traceContext: TraceContext
): EitherT[Future, TopologyTransactionRejection, Unit] = {
loadFromStore(
EffectiveTime.MaxValue,
Code.NamespaceDelegation,
filterUid = None,
filterNamespace = Some(Seq(toValidate.mapping.namespace)),
).flatMap { namespaceDelegations =>
val foundRootCertWithSameNamespace = namespaceDelegations.result.exists(stored =>
NamespaceDelegation.isRootCertificate(stored.transaction)
)
EitherTUtil.condUnitET(
!foundRootCertWithSameNamespace,
NamespaceAlreadyInUse(toValidate.mapping.namespace),
)
}
val checkThreshold = {
val actual = toValidate.mapping.threshold.value
val mustBeAtMost = toValidate.mapping.parties.size
EitherTUtil.condUnitET(
actual <= mustBeAtMost,
TopologyTransactionRejection.ThresholdTooHigh(actual, mustBeAtMost),
)
}
for {
_ <- checkDecentralizedNamespaceDerivedFromOwners()
_ <- checkNoClashWithRootCertificates()
} yield ()
}
private def checkNamespaceDelegationReplace(
toValidate: SignedTopologyTransaction[
TopologyChangeOp.Replace,
NamespaceDelegation,
]
)(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] = {
def checkNoClashWithDecentralizedNamespaces()(implicit
traceContext: TraceContext
): EitherT[Future, TopologyTransactionRejection, Unit] = {
EitherTUtil.ifThenET(NamespaceDelegation.isRootCertificate(toValidate)) {
loadFromStore(
EffectiveTime.MaxValue,
Code.DecentralizedNamespaceDefinition,
filterUid = None,
filterNamespace = Some(Seq(toValidate.mapping.namespace)),
).flatMap { dns =>
val foundDecentralizedNamespaceWithSameNamespace = dns.result.nonEmpty
EitherTUtil.condUnitET(
!foundDecentralizedNamespaceWithSameNamespace,
NamespaceAlreadyInUse(toValidate.mapping.namespace),
)
}
}
}
checkNoClashWithDecentralizedNamespaces()
checkThreshold.flatMap(_ => checkPartiesAreKnown())
}
}

View File

@ -6,6 +6,7 @@ package com.digitalasset.canton.tracing
import com.daml.scalautil.Statement.discard
import com.digitalasset.canton.concurrent.DirectExecutionContext
import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.lifecycle.FutureUnlessShutdownImpl.AbortedDueToShutdownException
import com.digitalasset.canton.logging.TracedLogger
import com.github.blemale.scaffeine.{AsyncLoadingCache, Scaffeine}
@ -74,10 +75,8 @@ class TracedAsyncLoadingCache[K, V](
)(tracedLogger: TracedLogger) {
implicit private[this] val ec: ExecutionContext = DirectExecutionContext(tracedLogger)
/*
* See com.github.blemale.scaffeine.AsyncLoadingCache.get
* If shutting down the future returned will be failed with a AbortedDueToShutdownException
*/
/** @see com.github.blemale.scaffeine.AsyncLoadingCache.get
*/
def get(key: K)(implicit traceContext: TraceContext): Future[V] =
underlying.get(TracedKey(key)(traceContext))
@ -86,14 +85,12 @@ class TracedAsyncLoadingCache[K, V](
discard(underlying.synchronous().asMap().filterInPlace((t, v) => !filter(t.key, v)))
}
def getUS(key: K)(implicit traceContext: TraceContext): FutureUnlessShutdown[V] =
def getUS(key: K)(implicit traceContext: TraceContext): FutureUnlessShutdown[V] = {
FutureUnlessShutdown.transformAbortedF(get(key))
}
/*
* See com.github.blemale.scaffeine.AsyncLoadingCache.getAll
* If shutting down the future returned will be failed with a AbortedDueToShutdownException wrapped inside
* a java.util.concurrent.CompletionException
*/
/** @see com.github.blemale.scaffeine.AsyncLoadingCache.getAll
*/
def getAll(keys: Iterable[K])(implicit traceContext: TraceContext): Future[Map[K, V]] =
underlying
.getAll(keys.map(TracedKey(_)(traceContext)))
@ -101,9 +98,16 @@ class TracedAsyncLoadingCache[K, V](
def getAllUS(
keys: Iterable[K]
)(implicit traceContext: TraceContext): FutureUnlessShutdown[Map[K, V]] = {
FutureUnlessShutdown.transformAbortedF(getAll(keys))
}
)(implicit traceContext: TraceContext): FutureUnlessShutdown[Map[K, V]] =
try
FutureUnlessShutdown.outcomeF(
underlying
.getAll(keys.map(TracedKey(_)(traceContext)))
.map(_.map { case (tracedKey, value) => tracedKey.key -> value })(ec)
)
catch {
case _: AbortedDueToShutdownException => FutureUnlessShutdown.abortedDueToShutdown
}
override def toString = s"TracedAsyncLoadingCache($underlying)"
}

View File

@ -4,9 +4,9 @@
package com.digitalasset.canton.util
import cats.{Monad, Order}
import com.daml.lf.data.*
import com.daml.lf.transaction.TransactionVersion
import com.daml.lf.value.Value
import com.digitalasset.daml.lf.data.*
import com.digitalasset.daml.lf.transaction.TransactionVersion
import com.digitalasset.daml.lf.value.Value
import com.digitalasset.canton.LfPartyId
import com.digitalasset.canton.protocol.*
@ -61,7 +61,7 @@ object LfTransactionUtil {
case n: LfNodeLookupByKey => n.result
}
/** All contract IDs referenced with a Daml `com.daml.lf.value.Value` */
/** All contract IDs referenced with a Daml `com.digitalasset.daml.lf.value.Value` */
def referencedContractIds(value: Value): Set[LfContractId] = value.cids
/** Whether or not a node has a random seed */
@ -106,7 +106,7 @@ object LfTransactionUtil {
/** Monadic visit to all nodes of the transaction in execution order.
* Exercise nodes are visited twice: when execution reaches them and when execution leaves their body.
* Crashes on malformed transactions (see `com.daml.lf.transaction.GenTransaction.isWellFormed`)
* Crashes on malformed transactions (see `com.digitalasset.daml.lf.transaction.GenTransaction.isWellFormed`)
*/
@nowarn("msg=match may not be exhaustive")
def foldExecutionOrderM[F[_], A](tx: LfTransaction, initial: A)(

View File

@ -1,4 +1,4 @@
sdk-version: 3.1.0-snapshot.20240620.13140.0.v996a1164
sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8
build-options:
- --target=2.1
name: CantonExamples

View File

@ -650,9 +650,9 @@ create table sequencer_lower_bound (
create table sequencer_events (
ts bigint primary key,
node_index smallint not null,
-- single char to indicate the event type: D for deliver event, E for deliver error, R for deliver receipt
-- single char to indicate the event type: D for deliver event, E for deliver error
event_type char(1) not null
constraint event_type_enum check (event_type IN ('D', 'E', 'R')),
constraint event_type_enum check (event_type = 'D' or event_type = 'E'),
message_id varchar null,
sender integer null,
-- null if event goes to everyone, otherwise specify member ids of recipients
@ -921,8 +921,6 @@ create table seq_traffic_control_consumed_journal (
extra_traffic_consumed bigint not null,
-- base traffic remainder at sequencing_timestamp
base_traffic_remainder bigint not null,
-- the last cost consumed at sequencing_timestamp
last_consumed_cost bigint not null,
-- traffic entries have a unique sequencing_timestamp per member
primary key (member, sequencing_timestamp)
);

View File

@ -1 +1 @@
1923effb9fa5d583e6c188f401e708a5e9c03b725ed988d0928a0b61660854a2
8347bf5092167e6a3df9d8f3cf1d0054a779e272589f7c0f3aad50cca8f8736a

View File

@ -81,8 +81,7 @@ CREATE TABLE lapi_command_completions (
trace_context BINARY LARGE OBJECT
);
CREATE INDEX lapi_command_completions_application_id_offset_idx ON lapi_command_completions USING btree (application_id, completion_offset);
CREATE INDEX lapi_command_completions_offset_idx ON lapi_command_completions USING btree (completion_offset);
CREATE INDEX lapi__command_completions_application_id_offset_idx ON lapi_command_completions USING btree (application_id, completion_offset);
---------------------------------------------------------------------------------------------------
-- Events: create

View File

@ -673,9 +673,9 @@ create table sequencer_lower_bound (
create table sequencer_events (
ts bigint primary key,
node_index smallint not null,
-- single char to indicate the event type: D for deliver event, E for deliver error, R for deliver receipt
-- single char to indicate the event type: D for deliver event, E for deliver error
event_type char(1) not null
constraint event_type_enum check (event_type IN ('D', 'E', 'R')),
constraint event_type_enum check (event_type = 'D' or event_type = 'E'),
message_id varchar(300) collate "C" null,
sender integer null,
-- null if event goes to everyone, otherwise specify member ids of recipients
@ -935,8 +935,6 @@ create table seq_traffic_control_consumed_journal (
extra_traffic_consumed bigint not null,
-- base traffic remainder at sequencing_timestamp
base_traffic_remainder bigint not null,
-- the last cost consumed at sequencing_timestamp
last_consumed_cost bigint not null,
-- traffic entries have a unique sequencing_timestamp per member
primary key (member, sequencing_timestamp)
);

View File

@ -1 +1 @@
1f50894cad8a5ce3e65f5e6b0a48484d2cf0cd7cc354fc6b0aa9cdda97d9e6d3
22559de6824376d64006305601db270b57afafb1eccc05e041e55bf3cb858e30

View File

@ -669,8 +669,7 @@ create or replace view debug.seq_traffic_control_consumed_journal as
member,
debug.canton_timestamp(sequencing_timestamp) as sequencing_timestamp,
extra_traffic_consumed,
base_traffic_remainder,
last_consumed_cost
base_traffic_remainder
from seq_traffic_control_consumed_journal;
create or replace view debug.seq_traffic_control_initial_timestamp as

View File

@ -1 +1 @@
d1c0b524698a1e1249785b0fe973f21f5542020215b49c4012bd774e310fb82e
f4d58cc709e08a2081d761637ea8d27393decb4ed1a6f4ee8ecf4843a838eab0

View File

@ -100,7 +100,6 @@ CREATE TABLE lapi_command_completions (
);
CREATE INDEX lapi_command_completions_application_id_offset_idx ON lapi_command_completions USING btree (application_id, completion_offset);
CREATE INDEX lapi_command_completions_offset_idx ON lapi_command_completions USING btree (completion_offset);
---------------------------------------------------------------------------------------------------
-- Events: Assign

View File

@ -1,20 +1,20 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.daml.lf
package com.digitalasset.daml.lf
import com.daml.lf.data.ImmArray
import com.daml.lf.data.Ref.PackageId
import com.daml.lf.language.{Ast, LanguageMajorVersion}
import com.daml.lf.speedy.Compiler
import com.daml.lf.transaction.{
import com.digitalasset.daml.lf.data.ImmArray
import com.digitalasset.daml.lf.data.Ref.PackageId
import com.digitalasset.daml.lf.language.{Ast, LanguageMajorVersion}
import com.digitalasset.daml.lf.speedy.Compiler
import com.digitalasset.daml.lf.transaction.{
Node,
NodeId,
TransactionCoder,
TransactionOuterClass,
TransactionVersion,
}
import com.daml.lf.value.ValueCoder
import com.digitalasset.daml.lf.value.ValueCoder
import com.digitalasset.canton.protocol.{
LfNode,
LfNodeId,

View File

@ -6,7 +6,7 @@ package com.digitalasset.canton.data
import cats.data.Chain
import cats.syntax.functor.*
import cats.syntax.functorFilter.*
import com.daml.lf.transaction.NodeId
import com.digitalasset.daml.lf.transaction.NodeId
import com.digitalasset.canton.LfPartyId
import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt}
import com.digitalasset.canton.data.TransactionViewDecomposition.{NewView, SameView}

View File

@ -3,7 +3,7 @@
package com.digitalasset.canton.protocol
import com.daml.lf.data.Ref.PackageId
import com.digitalasset.daml.lf.data.Ref.PackageId
import com.digitalasset.canton.LfPackageId
import com.digitalasset.canton.config.CantonRequireTypes.String255
import com.digitalasset.canton.data.CantonTimestamp

View File

@ -3,10 +3,10 @@
package com.digitalasset.canton.protocol
import com.daml.lf.crypto
import com.daml.lf.data.{ImmArray, Time}
import com.daml.lf.transaction.NodeId
import com.daml.lf.transaction.Transaction.Metadata
import com.digitalasset.daml.lf.crypto
import com.digitalasset.daml.lf.data.{ImmArray, Time}
import com.digitalasset.daml.lf.transaction.NodeId
import com.digitalasset.daml.lf.transaction.Transaction.Metadata
import com.digitalasset.canton.data.CantonTimestamp
/** Collects the metadata of a LF transaction to the extent that is needed in Canton

View File

@ -7,7 +7,7 @@ import cats.data.{NonEmptyChain, Validated}
import cats.syntax.either.*
import cats.syntax.foldable.*
import cats.syntax.functor.*
import com.daml.lf.data.ImmArray
import com.digitalasset.daml.lf.data.ImmArray
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.data.ActionDescription
import com.digitalasset.canton.protocol.RollbackContext.{RollbackScope, RollbackSibling}
@ -26,7 +26,7 @@ import scala.collection.mutable
* every non-root node has exactly one parent and is reachable from a root node. (No cycles, no sharing, no orphaned node).</li>
* <li>All node Ids are non-negative.</li>
* <li>The type parameter `S` determines whether all create nodes have suffixed IDs or none.</li>
* <li>Create nodes have unique contract ids of shape `com.daml.lf.value.Value.ContractId.V1`.
* <li>Create nodes have unique contract ids of shape `com.digitalasset.daml.lf.value.Value.ContractId.V1`.
* The contract id of a create node is not referenced before the node.
* The contract id of a rolled back create node is not referenced outside its rollback scope.</li>
* <li>The discriminators of create nodes without suffixed contract ids are unique among all discriminators that appear in the transaction.</li>

View File

@ -12,6 +12,7 @@ import com.digitalasset.canton.sequencing.protocol.{
Batch,
MediatorGroupRecipient,
OpenEnvelope,
ParticipantsOfParty,
Recipients,
}
import com.digitalasset.canton.topology.client.TopologySnapshot
@ -56,13 +57,32 @@ final case class TransactionConfirmationRequest(
val rootHashMessageEnvelopes =
NonEmpty.from(recipientsOfRootHashMessage) match {
case Some(recipientsNE) =>
val groupsWithMediator = recipientsNE.map(NonEmpty(Set, _, mediator))
val rootHashMessageEnvelope = OpenEnvelope(
rootHashMessage(ipsSnapshot.timestamp),
Recipients.recipientGroups(groupsWithMediator),
)(protocolVersion)
List(rootHashMessageEnvelope)
// TODO(#13883) Use BCC also for group addresses
// val groupsWithMediator =
// recipientsOfRootHashMessage.map(recipient => NonEmpty(Set, recipient, mediatorRecipient))
// val rootHashMessageEnvelope = OpenEnvelope(
// rootHashMessage,
// Recipients.recipientGroups(NonEmptyUtil.fromUnsafe(groupsWithMediator)),
// )(protocolVersion)
val groupAddressing = recipientsOfRootHashMessage.exists {
case ParticipantsOfParty(_) => true
case _ => false
}
// if using group addressing, we just place all recipients in one group instead of separately as before (it was separate for legacy reasons)
val rootHashMessageRecipients =
if (groupAddressing)
Recipients.recipientGroups(
NonEmpty.mk(Seq, recipientsNE.toSet ++ Seq(mediator))
)
else
Recipients.recipientGroups(
recipientsNE.map(NonEmpty.mk(Set, _, mediator))
)
List(
OpenEnvelope(rootHashMessage(ipsSnapshot.timestamp), rootHashMessageRecipients)(
protocolVersion
)
)
case None =>
loggingContext.warn("Confirmation request without root hash message recipients")
List.empty

View File

@ -117,7 +117,7 @@ class QueueBasedDomainOutbox(
private def hasUnsentTransactions: Boolean = domainOutboxQueue.numUnsentTransactions > 0
def newTransactionsAdded(
def newTransactionsAddedToAuthorizedStore(
asOf: CantonTimestamp,
num: Int,
): FutureUnlessShutdown[Unit] = {

View File

@ -162,7 +162,7 @@ class StoreBasedDomainOutbox(
final def queueSize: Int = watermarks.get().queuedApprox
final def newTransactionsAdded(
final def newTransactionsAddedToAuthorizedStore(
asOf: CantonTimestamp,
num: Int,
): FutureUnlessShutdown[Unit] = {
@ -375,7 +375,7 @@ abstract class DomainOutbox extends DomainOutboxHandle {
def targetClient: DomainTopologyClientWithInit
def newTransactionsAdded(
def newTransactionsAddedToAuthorizedStore(
asOf: CantonTimestamp,
num: Int,
): FutureUnlessShutdown[Unit]
@ -396,7 +396,7 @@ class DomainOutboxDynamicObserver(val loggerFactory: NamedLoggerFactory)
transactions: Seq[SignedTopologyTransaction[TopologyChangeOp, TopologyMapping]],
)(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = {
outboxRef.get.fold(FutureUnlessShutdown.unit)(
_.newTransactionsAdded(timestamp, transactions.size)
_.newTransactionsAddedToAuthorizedStore(timestamp, transactions.size)
)
}

View File

@ -4,9 +4,9 @@
package com.digitalasset.canton.util
import cats.implicits.*
import com.daml.lf.archive.{DarDecoder, DarReader}
import com.daml.lf.data.Ref
import com.daml.lf.language.Ast
import com.digitalasset.daml.lf.archive.{DarDecoder, DarReader}
import com.digitalasset.daml.lf.data.Ref
import com.digitalasset.daml.lf.language.Ast
import com.google.protobuf.ByteString
import java.io.{ByteArrayInputStream, File, InputStream}

View File

@ -3,8 +3,8 @@
package com.digitalasset.canton.version
import com.daml.lf.transaction.TransactionVersion
import com.daml.lf.transaction.TransactionVersion.*
import com.digitalasset.daml.lf.transaction.TransactionVersion
import com.digitalasset.daml.lf.transaction.TransactionVersion.*
import scala.collection.immutable.SortedMap
import scala.math.Ordered.orderingToOrdered

View File

@ -19,7 +19,7 @@ class RecipientsTreeTest extends AnyWordSpec with BaseTest {
lazy val p6: Member = ParticipantId("participant6")
lazy val alice = PartyId(UniqueIdentifier.tryFromProtoPrimitive(s"alice::party"))
lazy val bob = PartyId(UniqueIdentifier.tryFromProtoPrimitive(s"bob::party"))
lazy val bob = PartyId(UniqueIdentifier.tryFromProtoPrimitive(s"alice::bob"))
lazy val pop1: ParticipantsOfParty = ParticipantsOfParty(alice)
lazy val pop2: ParticipantsOfParty = ParticipantsOfParty(bob)
@ -51,16 +51,6 @@ class RecipientsTreeTest extends AnyWordSpec with BaseTest {
t5.forMember(p5, Set(pop1)) shouldBe List(t5)
}
}
"allPaths" should {
"give all paths within the tree" in {
t5.allPaths shouldBe Seq(
Seq(Set(rec(p1), pop1), Set(rec(p4), rec(p2), pop2), Set(rec(p1), rec(p5))),
Seq(Set(rec(p1), pop1), Set(rec(p4), rec(p2), pop2), Set(rec(p3))),
Seq(Set(rec(p1), pop1), Set(rec(p2), rec(p6), pop2)),
)
}
}
}
"serialization and deserialization" should {

View File

@ -4,10 +4,10 @@
package com.digitalasset.canton
import com.daml.ledger.javaapi.data.Identifier
import com.daml.lf.data.{FrontStack, ImmArray}
import com.daml.lf.transaction.NodeId
import com.daml.lf.transaction.test.NodeIdTransactionBuilder
import com.daml.lf.transaction.test.TransactionBuilder.Implicits.{toIdentifier, toPackageId}
import com.digitalasset.daml.lf.data.{FrontStack, ImmArray}
import com.digitalasset.daml.lf.transaction.NodeId
import com.digitalasset.daml.lf.transaction.test.NodeIdTransactionBuilder
import com.digitalasset.daml.lf.transaction.test.TransactionBuilder.Implicits.{toIdentifier, toPackageId}
import com.digitalasset.canton.ComparesLfTransactions.TxTree
import com.digitalasset.canton.logging.pretty.PrettyTestInstances.*
import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting}

View File

@ -4,7 +4,7 @@
package com.digitalasset.canton
import cats.Id
import com.daml.lf.data.{ImmArray, Ref}
import com.digitalasset.daml.lf.data.{ImmArray, Ref}
import com.digitalasset.canton.data.DeduplicationPeriod.DeduplicationDuration
import com.digitalasset.canton.protocol.{
LfCommittedTransaction,

View File

@ -3,8 +3,8 @@
package com.digitalasset.canton
import com.daml.lf.transaction.Versioned
import com.daml.lf.value.Value.ValueInt64
import com.digitalasset.daml.lf.transaction.Versioned
import com.digitalasset.daml.lf.value.Value.ValueInt64
import com.digitalasset.canton.crypto.{Hash, HashAlgorithm, TestHash}
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.protocol.{

View File

@ -3,7 +3,7 @@
package com.digitalasset.canton.data
import com.daml.lf.value.Value
import com.digitalasset.daml.lf.value.Value
import com.digitalasset.canton.data.ActionDescription.*
import com.digitalasset.canton.protocol.*
import com.digitalasset.canton.util.LfTransactionBuilder

View File

@ -3,7 +3,7 @@
package com.digitalasset.canton.data
import com.daml.lf.data.Time.Timestamp
import com.digitalasset.daml.lf.data.Time.Timestamp
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec

View File

@ -3,7 +3,7 @@
package com.digitalasset.canton.data
import com.daml.lf.value.Value.ValueInt64
import com.digitalasset.daml.lf.value.Value.ValueInt64
import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt}
import com.digitalasset.canton.crypto.{GeneratorsCrypto, Salt, TestHash}
import com.digitalasset.canton.data.ActionDescription.{

View File

@ -53,14 +53,12 @@ final class GeneratorsTrafficData(
extraTrafficLimit <- Arbitrary.arbitrary[NonNegativeLong]
extraTrafficConsumed <- Arbitrary.arbitrary[NonNegativeLong]
baseTrafficRemainder <- Arbitrary.arbitrary[NonNegativeLong]
lastConsumedCost <- Arbitrary.arbitrary[NonNegativeLong]
timestamp <- Arbitrary.arbitrary[CantonTimestamp]
serial <- Arbitrary.arbitrary[Option[PositiveInt]]
} yield TrafficState(
extraTrafficLimit,
extraTrafficConsumed,
baseTrafficRemainder,
lastConsumedCost,
timestamp,
serial,
)

View File

@ -3,9 +3,9 @@
package com.digitalasset.canton.data
import com.daml.lf.transaction.test.TestNodeBuilder.CreateKey
import com.daml.lf.transaction.test.TreeTransactionBuilder.NodeWrapper
import com.daml.lf.transaction.test.{TestIdFactory, TestNodeBuilder, TreeTransactionBuilder}
import com.digitalasset.daml.lf.transaction.test.TestNodeBuilder.CreateKey
import com.digitalasset.daml.lf.transaction.test.TreeTransactionBuilder.NodeWrapper
import com.digitalasset.daml.lf.transaction.test.{TestIdFactory, TestNodeBuilder, TreeTransactionBuilder}
import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt}
import com.digitalasset.canton.data.TransactionViewDecomposition.*
import com.digitalasset.canton.protocol.RollbackContext.{RollbackScope, RollbackSibling}
@ -115,7 +115,7 @@ class TransactionViewDecompositionTest
"a transaction with nested rollbacks" can {
import RollbackDecomposition.*
import com.daml.lf.transaction.test.TreeTransactionBuilder.*
import com.digitalasset.daml.lf.transaction.test.TreeTransactionBuilder.*
object tif extends TestIdFactory

View File

@ -4,7 +4,7 @@
package com.digitalasset.canton.data
import cats.syntax.either.*
import com.daml.lf.value.Value
import com.digitalasset.daml.lf.value.Value
import com.digitalasset.canton.crypto.{HashOps, Salt, TestSalt}
import com.digitalasset.canton.data.ViewParticipantData.InvalidViewParticipantData
import com.digitalasset.canton.protocol.*

View File

@ -3,7 +3,7 @@
package com.digitalasset.canton.ledger.offset
import com.daml.lf.data.Ref
import com.digitalasset.daml.lf.data.Ref
import com.digitalasset.canton.data.Offset
import org.scalacheck.{Arbitrary, Gen}

View File

@ -6,11 +6,11 @@ package com.digitalasset.canton.protocol
import cats.syntax.functor.*
import cats.syntax.functorFilter.*
import cats.syntax.option.*
import com.daml.lf.data.Ref.PackageId
import com.daml.lf.data.{Bytes, ImmArray}
import com.daml.lf.transaction.Versioned
import com.daml.lf.value.Value
import com.daml.lf.value.Value.{
import com.digitalasset.daml.lf.data.Ref.PackageId
import com.digitalasset.daml.lf.data.{Bytes, ImmArray}
import com.digitalasset.daml.lf.transaction.Versioned
import com.digitalasset.daml.lf.value.Value
import com.digitalasset.daml.lf.value.Value.{
ValueContractId,
ValueOptional,
ValueRecord,

View File

@ -3,7 +3,7 @@
package com.digitalasset.canton.protocol
import com.daml.lf.transaction.Versioned
import com.digitalasset.daml.lf.transaction.Versioned
import com.digitalasset.canton.LfPartyId
import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt}
import com.digitalasset.canton.crypto.*

View File

@ -3,8 +3,8 @@
package com.digitalasset.canton.protocol
import com.daml.lf.data.{Bytes, Ref}
import com.daml.lf.value.Value
import com.digitalasset.daml.lf.data.{Bytes, Ref}
import com.digitalasset.daml.lf.value.Value
import com.digitalasset.canton.crypto.{Hash, HashAlgorithm, TestHash, TestSalt}
import com.digitalasset.canton.data.{CantonTimestamp, ProcessedDisclosedContract}
import com.digitalasset.canton.protocol.SerializableContract.LedgerCreateTime

View File

@ -3,7 +3,7 @@
package com.digitalasset.canton.protocol
import com.daml.lf.value.ValueCoder
import com.digitalasset.daml.lf.value.ValueCoder
import com.digitalasset.canton.BaseTest
import com.digitalasset.canton.serialization.HasCryptographicEvidenceTest
import com.google.protobuf.ByteString

View File

@ -4,8 +4,8 @@
package com.digitalasset.canton.protocol
import com.daml.ledger.javaapi.data.Identifier
import com.daml.lf.transaction.test.TestNodeBuilder.CreateKey
import com.daml.lf.transaction.test.{TestNodeBuilder, TransactionBuilder}
import com.digitalasset.daml.lf.transaction.test.TestNodeBuilder.CreateKey
import com.digitalasset.daml.lf.transaction.test.{TestNodeBuilder, TransactionBuilder}
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.ComparesLfTransactions.{TxTree, buildLfTransaction}
import com.digitalasset.canton.config.RequireTypes.PositiveInt
@ -70,7 +70,7 @@ class WellFormedTransactionMergeTest
arg = args(
LfValue.ValueParty(alice.toLf),
LfValue.ValueParty(alice.toLf),
args(LfValue.ValueNumeric(com.daml.lf.data.Numeric.assertFromString("0.0"))),
args(LfValue.ValueNumeric(com.digitalasset.daml.lf.data.Numeric.assertFromString("0.0"))),
),
)
),
@ -324,7 +324,7 @@ class WellFormedTransactionMergeTest
args(
LfValue.ValueParty(lfPayer),
LfValue.ValueParty(lfOwner),
args(LfValue.ValueNumeric(com.daml.lf.data.Numeric.assertFromString("0.0"))),
args(LfValue.ValueNumeric(com.digitalasset.daml.lf.data.Numeric.assertFromString("0.0"))),
valueList(lfObservers.map(LfValue.ValueParty)),
)
case _ => arg

View File

@ -3,8 +3,8 @@
package com.digitalasset.canton.protocol
import com.daml.lf.data.ImmArray
import com.daml.lf.value.Value
import com.digitalasset.daml.lf.data.ImmArray
import com.digitalasset.daml.lf.value.Value
import com.digitalasset.canton.protocol.ExampleTransactionFactory.*
import com.digitalasset.canton.protocol.WellFormedTransaction.{State, WithSuffixes, WithoutSuffixes}
import com.digitalasset.canton.{BaseTest, HasExecutionContext, LfPackageName, LfPartyId}

View File

@ -91,7 +91,7 @@ class TopologyTransactionTest extends AnyWordSpec with BaseTest with HasCryptogr
"party to participant" should {
val p1 =
mk(
PartyToParticipant.tryCreate(
PartyToParticipant(
PartyId(uid),
None,
PositiveInt.one,
@ -102,12 +102,12 @@ class TopologyTransactionTest extends AnyWordSpec with BaseTest with HasCryptogr
val p2 =
mk(
PartyToParticipant.tryCreate(
PartyToParticipant(
PartyId(uid),
Some(domainId),
PositiveInt.two,
Seq(
HostingParticipant(ParticipantId(uid2), ParticipantPermission.Confirmation),
HostingParticipant(ParticipantId(uid2), ParticipantPermission.Observation),
HostingParticipant(ParticipantId(uid), ParticipantPermission.Submission),
),
groupAddressing = true,

View File

@ -789,7 +789,6 @@ class SequencerClientTest
CantonTimestamp.MinValue.immediateSuccessor,
trafficReceipt.extraTrafficConsumed,
trafficReceipt.baseTrafficRemainder,
trafficReceipt.consumedCost,
)
}
@ -839,7 +838,6 @@ class SequencerClientTest
CantonTimestamp.MinValue.immediateSuccessor,
trafficReceipt.extraTrafficConsumed,
trafficReceipt.baseTrafficRemainder,
trafficReceipt.consumedCost,
)
}
@ -1154,7 +1152,6 @@ class SequencerClientTest
extraTrafficConsumed =
NonNegativeLong.tryCreate(Math.abs(request.timestamp.toProtoPrimitive)),
baseTrafficRemainder = NonNegativeLong.zero,
lastConsumedCost = NonNegativeLong.zero,
timestamp = request.timestamp,
serial = None,
)

View File

@ -76,11 +76,6 @@ class PartyTopologySnapshotClientTest extends AsyncWordSpec with BaseTest {
): Future[Set[LfPartyId]] =
???
override def activeParticipantsOfPartiesWithGroupAddressing(
parties: Seq[LfPartyId]
)(implicit traceContext: TraceContext): Future[Map[LfPartyId, Set[ParticipantId]]] =
???
override def consortiumThresholds(
parties: Set[LfPartyId]
)(implicit traceContext: TraceContext): Future[Map[LfPartyId, PositiveInt]] = ???

View File

@ -43,7 +43,7 @@ trait StoreBasedTopologySnapshotTest extends AsyncWordSpec with BaseTest with Ha
import factory.TestingTransactions.*
lazy val party1participant1 = mkAdd(
PartyToParticipant.tryCreate(
PartyToParticipant(
party1,
None,
PositiveInt.one,
@ -52,7 +52,7 @@ trait StoreBasedTopologySnapshotTest extends AsyncWordSpec with BaseTest with Ha
)
)
lazy val party2participant1_2 = mkAdd(
PartyToParticipant.tryCreate(
PartyToParticipant(
party2,
None,
PositiveInt.one,

View File

@ -4,7 +4,6 @@
package com.digitalasset.canton.topology.processing
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.crypto.SigningPublicKey
import com.digitalasset.canton.topology.transaction.{NamespaceDelegation, TopologyMapping}
import com.digitalasset.canton.topology.{Namespace, TestingOwnerWithKeys}
@ -26,7 +25,7 @@ class AuthorizationGraphTest
def mkGraph = new AuthorizationGraph(namespace, extraDebugInfo = true, loggerFactory)
def mkAdd(
def mkAuth(
nsd: NamespaceDelegation,
key: SigningPublicKey,
): AuthorizedTopologyTransaction[NamespaceDelegation] = {
@ -34,27 +33,16 @@ class AuthorizationGraphTest
AuthorizedTopologyTransaction(tx)
}
def mkRemove(
nsd: NamespaceDelegation,
key: SigningPublicKey,
): AuthorizedTopologyTransaction[NamespaceDelegation] = {
val tx = factory.mkRemove(nsd, NonEmpty(Set, key), PositiveInt.two)
AuthorizedTopologyTransaction(tx)
}
def mkNs(namespace: Namespace, key: SigningPublicKey, isRootDelegation: Boolean) =
NamespaceDelegation.tryCreate(namespace, key, isRootDelegation)
val nsk1k1 = mkAdd(mkNs(namespace, key1, isRootDelegation = true), key1)
val nsk1k1_remove = mkRemove(mkNs(namespace, key1, isRootDelegation = true), key1)
val nsk2k1 = mkAdd(mkNs(namespace, key2, isRootDelegation = true), key1)
val nsk2k1_remove = mkRemove(mkNs(namespace, key2, isRootDelegation = true), key1)
val nsk3k2 = mkAdd(mkNs(namespace, key3, isRootDelegation = true), key2)
val nsk3k2_remove = mkRemove(mkNs(namespace, key3, isRootDelegation = true), key2)
val nsk1k1 = mkAuth(mkNs(namespace, key1, isRootDelegation = true), key1)
val nsk2k1 = mkAuth(mkNs(namespace, key2, isRootDelegation = true), key1)
val nsk2k1p = mkAuth(mkNs(namespace, key2, isRootDelegation = true), key1)
val nsk3k2 = mkAuth(mkNs(namespace, key3, isRootDelegation = true), key2)
val nsk1k2 =
mkAdd(mkNs(namespace, key1, isRootDelegation = true), key2) // cycle
val nsk3k1_nonRoot = mkAdd(mkNs(namespace, key3, isRootDelegation = false), key1)
val nsk3k1_nonRoot_remove = mkRemove(mkNs(namespace, key3, isRootDelegation = false), key1)
mkAuth(mkNs(namespace, key1, isRootDelegation = true), key2) // cycle
val nsk3k1_nonRoot = mkAuth(mkNs(namespace, key3, isRootDelegation = false), key1)
def replaceSignature[T <: TopologyMapping](
authTx: AuthorizedTopologyTransaction[T],
@ -77,7 +65,7 @@ class AuthorizationGraphTest
requireRoot: Boolean,
valid: Boolean,
) = {
graph.existsAuthorizedKeyIn(Set(key.fingerprint), requireRoot = requireRoot) shouldBe valid
graph.areValidAuthorizationKeys(Set(key.fingerprint), requireRoot = requireRoot) shouldBe valid
}
"authorization graph" when {
@ -105,7 +93,7 @@ class AuthorizationGraphTest
val graph = mkGraph
graph.add(nsk1k1)
graph.add(nsk2k1)
graph.remove(nsk2k1_remove)
graph.remove(nsk2k1)
check(graph, key2, requireRoot = false, valid = false)
check(graph, key1, requireRoot = false, valid = true)
}
@ -116,13 +104,10 @@ class AuthorizationGraphTest
graph.add(nsk3k2)
check(graph, key2, requireRoot = false, valid = true)
check(graph, key3, requireRoot = false, valid = true)
loggerFactory.assertLogs(
graph.remove(nsk2k1_remove),
_.warningMessage should include("dangling"),
)
loggerFactory.assertLogs(graph.remove(nsk2k1), _.warningMessage should include("dangling"))
check(graph, key2, requireRoot = false, valid = false)
check(graph, key3, requireRoot = false, valid = false)
graph.add(nsk2k1)
graph.add(nsk2k1p)
check(graph, key3, requireRoot = false, valid = true)
}
"support several chains" in {
@ -133,7 +118,7 @@ class AuthorizationGraphTest
check(graph, key3, requireRoot = false, valid = true)
graph.add(nsk3k1_nonRoot)
check(graph, key3, requireRoot = false, valid = true)
graph.remove(nsk3k1_nonRoot_remove)
graph.remove(nsk3k1_nonRoot)
check(graph, key3, requireRoot = false, valid = true)
}
@ -151,7 +136,7 @@ class AuthorizationGraphTest
graph.add(nsk1k1)
graph.add(nsk2k1)
graph.add(nsk3k2)
graph.remove(nsk1k1_remove)
graph.remove(nsk1k1)
check(graph, key1, requireRoot = false, valid = false)
check(graph, key2, requireRoot = false, valid = false)
check(graph, key3, requireRoot = false, valid = false)
@ -174,17 +159,17 @@ class AuthorizationGraphTest
// test that random key is not authorized
check(graph, key3, requireRoot = false, valid = false)
// remove first certificate
graph.remove(nsk2k1_remove)
graph.remove(nsk2k1)
check(graph, key2, requireRoot = true, valid = false)
// add other certificate (we don't remember removes, so we can do that in this test)
graph.add(nsk2k1)
graph.add(nsk2k1p)
check(graph, key2, requireRoot = true, valid = true)
}
"reject delegations with a wrong namespace" in {
val graph = mkGraph
val fakeNs = Namespace(key8.fingerprint)
val nsk1k1 = mkAdd(mkNs(fakeNs, key1, isRootDelegation = true), key1)
val nsk1k1 = mkAuth(mkNs(fakeNs, key1, isRootDelegation = true), key1)
loggerFactory.assertThrowsAndLogs[IllegalArgumentException](
graph.add(nsk1k1),
_.errorMessage should include("internal error"),
@ -199,7 +184,7 @@ class AuthorizationGraphTest
graph.add(nsk3k2)
check(graph, key3, requireRoot = true, valid = true)
graph.remove(replaceSignature(nsk3k2_remove, key1))
graph.remove(replaceSignature(nsk3k2, key1))
check(graph, key3, requireRoot = true, valid = false)
}
}
@ -217,10 +202,10 @@ class AuthorizationGraphTest
graph.add(nsk1k1)
graph.add(nsk2k1)
check(graph, key2, requireRoot = false, valid = true)
val fakeRemove = replaceSignature(nsk2k1_remove, key6)
val fakeRemove = replaceSignature(nsk2k1, key6)
graph.remove(fakeRemove) shouldBe false
check(graph, key2, requireRoot = false, valid = true)
graph.remove(nsk2k1_remove)
graph.remove(nsk2k1)
check(graph, key2, requireRoot = false, valid = false)
}
"prevent a non-root authorization to authorize a root authorization" in {
@ -228,7 +213,7 @@ class AuthorizationGraphTest
graph.add(nsk1k1)
graph.add(nsk3k1_nonRoot)
check(graph, key3, requireRoot = false, valid = true)
val nsk4k3 = mkAdd(mkNs(namespace, key4, isRootDelegation = true), key3)
val nsk4k3 = mkAuth(mkNs(namespace, key4, isRootDelegation = true), key3)
graph.add(nsk4k3) shouldBe false
check(graph, key4, requireRoot = false, valid = false)
}
@ -240,14 +225,14 @@ class AuthorizationGraphTest
graph.add(nsk2k1)
check(graph, key3, requireRoot = false, valid = true)
check(graph, key2, requireRoot = true, valid = true)
graph.remove(replaceSignature(nsk2k1_remove, key3)) shouldBe false
graph.remove(replaceSignature(nsk2k1, key3)) shouldBe false
check(graph, key2, requireRoot = true, valid = true)
}
"ensure once a delegation is revoked, all depending authorizations will become unauthorized" in {
val graph = mkGraph
val nsk4k3 = mkAdd(mkNs(namespace, key4, isRootDelegation = true), key3)
val nsk5k2 = mkAdd(mkNs(namespace, key5, isRootDelegation = true), key3)
val nsk4k3 = mkAuth(mkNs(namespace, key4, isRootDelegation = true), key3)
val nsk5k2 = mkAuth(mkNs(namespace, key5, isRootDelegation = true), key3)
graph.add(nsk1k1)
graph.add(nsk2k1)
graph.add(nsk3k2)
@ -256,7 +241,7 @@ class AuthorizationGraphTest
Seq(key3, key4, key5).foreach(check(graph, _, requireRoot = false, valid = true))
loggerFactory.assertLogs(
{
graph.remove(nsk2k1_remove)
graph.remove(nsk2k1)
Seq(key3, key4, key5).foreach(check(graph, _, requireRoot = false, valid = false))
},
_.warningMessage should include("The following target keys"),

View File

@ -70,7 +70,7 @@ class DecentralizedNamespaceAuthorizationGraphTest
}
def mkAdd(
def mkAuth(
nsd: NamespaceDelegation,
key: SigningPublicKey,
): AuthorizedTopologyTransaction[NamespaceDelegation] = {
@ -78,30 +78,20 @@ class DecentralizedNamespaceAuthorizationGraphTest
AuthorizedTopologyTransaction(tx)
}
def mkRemove(
nsd: NamespaceDelegation,
key: SigningPublicKey,
): AuthorizedTopologyTransaction[NamespaceDelegation] = {
val tx = factory.mkRemove(nsd, NonEmpty(Set, key), PositiveInt.two)
AuthorizedTopologyTransaction(tx)
}
def mkNs(namespace: Namespace, key: SigningPublicKey, isRootDelegation: Boolean) =
NamespaceDelegation.tryCreate(namespace, key, isRootDelegation)
val ns1k1k1 = mkAdd(mkNs(ns1, key1, isRootDelegation = true), key1)
val ns1k1k1 = mkAuth(mkNs(ns1, key1, isRootDelegation = true), key1)
val ns1k4k1 = mkAuth(mkNs(ns1, key4, isRootDelegation = true), key1)
val ns2k2k2 = mkAdd(mkNs(ns2, key2, isRootDelegation = true), key2)
val ns2k2k2_remove = mkRemove(mkNs(ns2, key2, isRootDelegation = true), key2)
val ns2k5k2 = mkAdd(mkNs(ns2, key5, isRootDelegation = true), key2)
val ns2k5k2_remove = mkRemove(mkNs(ns2, key5, isRootDelegation = true), key2)
val ns2k2k5 = mkAdd(mkNs(ns2, key2, isRootDelegation = true), key5)
val ns2k8k5 = mkAdd(mkNs(ns2, key8, isRootDelegation = true), key5)
val ns2k8k5_remove = mkRemove(mkNs(ns2, key8, isRootDelegation = true), key5)
val ns2k8k2_nonRoot = mkAdd(mkNs(ns2, key8, isRootDelegation = false), key2)
val ns2k8k2_nonRoot_remove = mkRemove(mkNs(ns2, key8, isRootDelegation = false), key2)
val ns2k2k2 = mkAuth(mkNs(ns2, key2, isRootDelegation = true), key2)
val ns2k5k2 = mkAuth(mkNs(ns2, key5, isRootDelegation = true), key2)
val ns2k2k5 = mkAuth(mkNs(ns2, key5, isRootDelegation = true), key2)
val ns2k8k5 = mkAuth(mkNs(ns2, key8, isRootDelegation = true), key5)
val ns2k8k2_nonRoot = mkAuth(mkNs(ns2, key8, isRootDelegation = false), key2)
val ns3k3k3 = mkAdd(mkNs(ns3, key3, isRootDelegation = true), key3)
val ns3k3k3 = mkAuth(mkNs(ns3, key3, isRootDelegation = true), key3)
val ns3k6k3 = mkAuth(mkNs(ns3, key6, isRootDelegation = true), key3)
def replaceSignature[T <: TopologyMapping](
authTx: AuthorizedTopologyTransaction[T],
@ -124,7 +114,7 @@ class DecentralizedNamespaceAuthorizationGraphTest
requireRoot: Boolean,
valid: Boolean,
)(keys: SigningPublicKey*) = {
graph.existsAuthorizedKeyIn(
graph.areValidAuthorizationKeys(
keys.map(_.fingerprint).toSet,
requireRoot = requireRoot,
) shouldBe valid
@ -174,7 +164,7 @@ class DecentralizedNamespaceAuthorizationGraphTest
graph.addAuth(ns2k2k2)
graph.addAuth(ns3k3k3)
graph.removeAuth(ns2k2k2_remove)
graph.removeAuth(ns2k2k2)
check(graph, requireRoot = false, valid = false)(key1, key2)
check(graph, requireRoot = false, valid = true)(key1, key3)
}
@ -190,7 +180,7 @@ class DecentralizedNamespaceAuthorizationGraphTest
check(graph, requireRoot = false, valid = true)(key1, key5)
check(graph, requireRoot = false, valid = true)(key1, key8)
loggerFactory.assertLogs(
graph.removeAuth(ns2k5k2_remove),
graph.removeAuth(ns2k5k2),
_.warningMessage should include("dangling"),
)
check(graph, requireRoot = false, valid = false)(key1, key5)
@ -210,7 +200,7 @@ class DecentralizedNamespaceAuthorizationGraphTest
check(graph, requireRoot = false, valid = true)(key1, key8)
graph.addAuth(ns2k8k2_nonRoot)
check(graph, requireRoot = false, valid = true)(key1, key8)
graph.removeAuth(ns2k8k2_nonRoot_remove)
graph.removeAuth(ns2k8k2_nonRoot)
check(graph, requireRoot = false, valid = true)(key1, key8)
}
@ -232,7 +222,7 @@ class DecentralizedNamespaceAuthorizationGraphTest
graph.addAuth(ns2k5k2)
graph.addAuth(ns2k8k5)
graph.removeAuth(ns2k2k2_remove)
graph.removeAuth(ns2k2k2)
check(graph, requireRoot = false, valid = false)(key1, key2)
check(graph, requireRoot = false, valid = false)(key1, key5)
check(graph, requireRoot = false, valid = false)(key1, key8)
@ -257,7 +247,7 @@ class DecentralizedNamespaceAuthorizationGraphTest
// test that random key is not authorized
check(graph, requireRoot = false, valid = false)(key1, key3)
// remove first certificate
graph.removeAuth(ns2k5k2_remove)
graph.removeAuth(ns2k5k2)
check(graph, requireRoot = true, valid = false)(key1, key5)
// add other certificate (we don't remember removes, so we can do that in this test)
graph.addAuth(ns2k5k2)
@ -274,7 +264,7 @@ class DecentralizedNamespaceAuthorizationGraphTest
graph.addAuth(ns2k8k5)
check(graph, requireRoot = true, valid = true)(key1, key8)
graph.removeAuth(replaceSignature(ns2k8k5_remove, key2))
graph.removeAuth(replaceSignature(ns2k8k5, key2))
check(graph, requireRoot = true, valid = false)(key1, key8)
}
}

View File

@ -5,11 +5,9 @@ package com.digitalasset.canton.topology.processing
import cats.Apply
import cats.instances.list.*
import cats.syntax.foldable.*
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.crypto.SignatureCheckError.InvalidSignature
import com.digitalasset.canton.crypto.{Signature, SigningPublicKey}
import com.digitalasset.canton.crypto.SigningPublicKey
import com.digitalasset.canton.data.CantonTimestamp
import com.digitalasset.canton.topology.*
import com.digitalasset.canton.topology.store.TopologyStoreId.DomainStore
@ -23,13 +21,10 @@ import com.digitalasset.canton.topology.store.{
TopologyTransactionRejection,
ValidatedTopologyTransaction,
}
import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction
import com.digitalasset.canton.topology.transaction.TopologyMapping.MappingHash
import com.digitalasset.canton.topology.transaction.*
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.util.MonadUtil
import com.digitalasset.canton.{BaseTest, HasExecutionContext, ProtocolVersionChecksAsyncWordSpec}
import com.google.protobuf.ByteString
import org.scalatest.wordspec.AsyncWordSpec
class IncomingTopologyTransactionAuthorizationValidatorTest
@ -74,35 +69,12 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
succeed
}
def validate(
validator: IncomingTopologyTransactionAuthorizationValidator,
timestamp: CantonTimestamp,
toValidate: Seq[GenericSignedTopologyTransaction],
inStore: Map[MappingHash, GenericSignedTopologyTransaction],
expectFullAuthorization: Boolean,
)(implicit traceContext: TraceContext) = {
MonadUtil
.sequentialTraverse(toValidate)(tx =>
validator.validateAndUpdateHeadAuthState(
timestamp,
tx,
inStore.get(tx.mapping.uniqueKey),
expectFullAuthorization,
)
)
.map { results =>
val (aggregations, transactions) = results.unzip
(aggregations.combineAll, transactions)
}
}
"receiving transactions with signatures" should {
"succeed to add if the signature is valid" in {
val validator = mk()
import Factory.*
for {
res <- validate(
validator,
res <- validator.validateAndUpdateHeadAuthState(
ts(0),
List(ns1k1_k1, ns1k2_k1),
Map.empty,
@ -117,8 +89,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
import Factory.*
val invalid = ns1k2_k1.copy(signatures = ns1k1_k1.signatures)
for {
(_, validatedTopologyTransactions) <- validate(
validator,
(_, validatedTopologyTransactions) <- validator.validateAndUpdateHeadAuthState(
ts(0),
List(ns1k1_k1, invalid),
Map.empty,
@ -145,8 +116,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
val okmS1k7_k1_missing_k7 =
okmS1k7_k1.removeSignatures(Set(SigningKeys.key7.fingerprint)).value
for {
(_, validatedTopologyTransactions) <- validate(
validator,
(_, validatedTopologyTransactions) <- validator.validateAndUpdateHeadAuthState(
ts(0),
List(ns1k1_k1, okmS1k7_k1_missing_k7),
Map.empty,
@ -163,41 +133,37 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
}
}
"reject if the transaction is for the wrong domain" in {
val validator = mk()
import Factory.*
val wrongDomain = DomainId(UniqueIdentifier.tryCreate("wrong", ns1.fingerprint.unwrap))
val pid = ParticipantId(UniqueIdentifier.tryCreate("correct", ns1.fingerprint.unwrap))
val wrong = mkAdd(
DomainTrustCertificate(
pid,
wrongDomain,
false,
Seq.empty,
),
Factory.SigningKeys.key1,
)
for {
res <- validate(
validator,
ts(0),
List(ns1k1_k1, wrong),
Map.empty,
expectFullAuthorization = false,
)
} yield {
check(
res._2,
Seq(
None,
Some({
case TopologyTransactionRejection.WrongDomain(_) => true
case _ => false
}),
),
)
}
}
// TODO(#12390) resuscitate
// "reject if the transaction is for the wrong domain" in {
// val validator = mk()
// import Factory.*
// val wrongDomain = DomainId(UniqueIdentifier.tryCreate("wrong", ns1.fingerprint.unwrap))
// val pid = ParticipantId(UniqueIdentifier.tryCreate("correct", ns1.fingerprint.unwrap))
// val wrong = mkAdd(
// ParticipantState(
// RequestSide.Both,
// wrongDomain,
// pid,
// ParticipantPermission.Submission,
// TrustLevel.Ordinary,
// ),
// Factory.SigningKeys.key1,
// )
// for {
// res <- validator.validateAndUpdateHeadAuthState(ts(0), List(ns1k1_k1, wrong))
// } yield {
// check(
// res._2,
// Seq(
// None,
// Some({
// case TopologyTransactionRejection.WrongDomain(_) => true
// case _ => false
// }),
// ),
// )
// }
// }
}
"observing namespace delegations" should {
@ -205,8 +171,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
val validator = mk()
import Factory.*
for {
res <- validate(
validator,
res <- validator.validateAndUpdateHeadAuthState(
ts(0),
List(ns1k1_k1, ns1k2_k1, ns1k3_k2),
Map.empty,
@ -216,46 +181,11 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
check(res._2, Seq(None, None, None))
}
}
"fail if the signature of a root certificate is not valid" in {
val validator = mk()
import Factory.*
val sig_k1_emptySignature = Signature
.fromProtoV30(ns1k1_k1.signatures.head1.toProtoV30.copy(signature = ByteString.empty()))
.value
val ns1k1_k1WithEmptySignature =
ns1k1_k1.copy(signatures = NonEmpty(Set, sig_k1_emptySignature))
for {
res <- validate(
validator,
ts(0),
List(ns1k1_k1WithEmptySignature, ns1k2_k1),
Map.empty,
expectFullAuthorization = true,
)
} yield {
check(
res._2,
Seq(
Some({
case TopologyTransactionRejection.SignatureCheckFailed(
InvalidSignature(`sig_k1_emptySignature`, _, _)
) =>
true
case _ => false
}),
Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key1.fingerprint))),
),
)
}
}
"fail if transaction is not properly authorized" in {
val validator = mk()
import Factory.*
for {
res <- validate(
validator,
res <- validator.validateAndUpdateHeadAuthState(
ts(0),
List(ns1k1_k1, ns6k3_k6, ns1k3_k2, ns1k2_k1, ns1k3_k2),
Map.empty,
@ -287,8 +217,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
removeTxs = Set.empty,
additions = List(ns1k1_k1).map(ValidatedTopologyTransaction(_)),
)
res <- validate(
validator,
res <- validator.validateAndUpdateHeadAuthState(
ts(1),
List(ns1k2_k1, ns1k3_k2),
Map.empty,
@ -303,8 +232,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
val validator = mk()
import Factory.*
for {
res <- validate(
validator,
res <- validator.validateAndUpdateHeadAuthState(
ts(1),
List(ns1k1_k1, ns1k3_k2, id1ak4_k2, ns1k2_k1, ns6k3_k6, id1ak4_k1),
Map.empty,
@ -333,8 +261,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
val validator = mk()
import Factory.*
for {
res <- validate(
validator,
res <- validator.validateAndUpdateHeadAuthState(
ts(0),
List(ns1k1_k1, id1ak4_k1, ns1k2_k1, id1ak4_k2),
Map.empty,
@ -348,8 +275,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
val validator = mk()
import Factory.*
for {
res <- validate(
validator,
res <- validator.validateAndUpdateHeadAuthState(
ts(0),
List(id1ak4_k1, ns1k1_k1, id1ak4_k1, id6k4_k1),
Map.empty,
@ -375,8 +301,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
val validator = mk()
import Factory.*
for {
res <- validate(
validator,
res <- validator.validateAndUpdateHeadAuthState(
ts(0),
List(ns1k1_k1, ns1k2_k1, okm1ak5k1E_k2, p1p1B_k2, id1ak4_k1, ns6k6_k6, p1p6_k2k6),
Map.empty,
@ -390,41 +315,21 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
val validator = mk()
import Factory.*
for {
resultExpectFullAuthorization <- validate(
validator,
res <- validator.validateAndUpdateHeadAuthState(
ts(0),
List(ns1k1_k1, okm1ak5k1E_k2, p1p1B_k2),
Map.empty,
expectFullAuthorization = true,
)
// also check that insufficiently authorized non-proposals get rejected with expectFullAuthorization
resultDontExpectFullAuthorization <- validate(
validator,
ts(0),
List(ns1k1_k1, okm1ak5k1E_k2, p1p1B_k2),
Map.empty,
expectFullAuthorization = false,
)
} yield {
check(
resultExpectFullAuthorization._2,
res._2,
Seq(
None,
Some(_ == NotAuthorized),
Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key2.fingerprint))),
),
)
check(
resultDontExpectFullAuthorization._2,
Seq(
None,
Some(_ == NotAuthorized),
Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key2.fingerprint))),
),
)
}
}
"succeed with loading existing identifier delegations" in {
@ -440,8 +345,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
removeTxs = Set.empty,
additions = List(ns1k1_k1, ns6k6_k6, id1ak4_k1).map(ValidatedTopologyTransaction(_)),
)
res <- validate(
validator,
res <- validator.validateAndUpdateHeadAuthState(
ts(1),
List(ns1k2_k1, p1p6_k2k6, p1p1B_k2),
Map.empty,
@ -460,8 +364,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
val Rns1k2_k1 = mkTrans(ns1k2_k1.transaction.reverse)
val Rid1ak4_k1 = mkTrans(id1ak4_k1.transaction.reverse)
for {
res <- validate(
validator,
res <- validator.validateAndUpdateHeadAuthState(
ts(0),
List(ns1k1_k1, ns1k2_k1, id1ak4_k1, Rns1k2_k1, Rid1ak4_k1),
Map.empty,
@ -478,8 +381,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
val Rns1k2_k1 = mkTrans(ns1k2_k1.transaction.reverse)
val Rid1ak4_k1 = mkTrans(id1ak4_k1.transaction.reverse)
for {
res <- validate(
validator,
res <- validator.validateAndUpdateHeadAuthState(
ts(0),
List(ns1k1_k1, ns1k2_k1, id1ak4_k1, Rns1k2_k1, Rid1ak4_k1, okm1ak5k1E_k2, p1p6_k2),
Map.empty,
@ -517,8 +419,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
removeTxs = Set.empty,
additions = List(ns6k6_k6).map(ValidatedTopologyTransaction(_)),
)
res <- validate(
validator,
res <- validator.validateAndUpdateHeadAuthState(
ts(1),
List(ns1k1_k1, okm1bk5k1E_k1, p1p6_k6),
Map.empty,
@ -543,8 +444,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
removeTxs = Set.empty,
additions = List(ns1k1_k1).map(ValidatedTopologyTransaction(_)),
)
res <- validate(
validator,
res <- validator.validateAndUpdateHeadAuthState(
ts(1),
List(Rns1k1_k1, okm1bk5k1E_k1),
Map(Rns1k1_k1.mapping.uniqueKey -> ns1k1_k1),
@ -573,15 +473,13 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
removeTxs = Set.empty,
additions = List(ns1k1_k1).map(ValidatedTopologyTransaction(_)),
)
res <- validate(
validator,
res <- validator.validateAndUpdateHeadAuthState(
ts(1),
List(id1ak4_k1),
Map.empty,
expectFullAuthorization = true,
)
res2 <- validate(
validator,
res2 <- validator.validateAndUpdateHeadAuthState(
ts(2),
List(Rid1ak4_k1),
Map.empty,
@ -613,8 +511,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
additions =
List(ns1k1_k1, ns1k2_k1, id1ak4_k2, ns6k6_k6).map(ValidatedTopologyTransaction(_)),
)
res <- validate(
validator,
res <- validator.validateAndUpdateHeadAuthState(
ts(1),
List(p1p6_k2k6, Rns1k2_k1, id6ak7_k6, p1p6_k2),
Map(
@ -646,44 +543,29 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
import Factory.*
val pid2 = ParticipantId(UniqueIdentifier.tryCreate("participant2", ns2))
val participants_1_2_6_HostParty1 = mkAddMultiKey(
PartyToParticipant.tryCreate(
val participant2HostsParty1 = mkAddMultiKey(
PartyToParticipant(
party1b, // lives in the namespace of p1, corresponding to `SigningKeys.key1`
None,
threshold = PositiveInt.two,
Seq(
HostingParticipant(participant1, ParticipantPermission.Submission),
HostingParticipant(pid2, ParticipantPermission.Submission),
HostingParticipant(participant6, ParticipantPermission.Submission),
),
groupAddressing = false,
),
// both the party's owner and the participant sign
NonEmpty(Set, SigningKeys.key1, SigningKeys.key2, SigningKeys.key6),
NonEmpty(Set, SigningKeys.key1, SigningKeys.key2),
serial = PositiveInt.one,
)
val unhostingMapping = PartyToParticipant.tryCreate(
val unhostingMapping = PartyToParticipant(
party1b,
None,
threshold = PositiveInt.two,
Seq(
HostingParticipant(participant1, ParticipantPermission.Submission),
HostingParticipant(participant6, ParticipantPermission.Submission),
),
Seq(HostingParticipant(participant1, ParticipantPermission.Submission)),
groupAddressing = false,
)
val unhostingMappingAndThresholdChange = PartyToParticipant.tryCreate(
party1b,
None,
threshold = PositiveInt.one,
Seq(
HostingParticipant(participant1, ParticipantPermission.Submission),
HostingParticipant(participant6, ParticipantPermission.Submission),
),
groupAddressing = false,
)
val participant2RemovesItselfUnilaterally = mkAdd(
unhostingMapping,
// only the unhosting participant signs
@ -698,54 +580,53 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
serial = PositiveInt.two,
)
val ptpMappingHash = participants_1_2_6_HostParty1.mapping.uniqueKey
val ptpMappingHash = participant2HostsParty1.mapping.uniqueKey
import monocle.syntax.all.*
for {
_ <- store.update(
SequencedTime(ts(0)),
EffectiveTime(ts(0)),
removeMapping = Map.empty,
removeTxs = Set.empty,
additions = List(ns1k1_k1, ns2k2_k2, ns6k6_k6).map(
additions = List(ns1k1_k1, ns2k2_k2).map(
ValidatedTopologyTransaction(_)
),
)
hostingResult <- validate(
validator,
hostingResult <- validator.validateAndUpdateHeadAuthState(
ts(1),
List(participants_1_2_6_HostParty1),
inStore = Map.empty,
List(participant2HostsParty1),
transactionsInStore = Map.empty,
expectFullAuthorization = false,
)
// unilateral unhosting by participant2 only signed by the participant
unhostingResult <- validate(
validator,
unhostingResult <- validator.validateAndUpdateHeadAuthState(
ts(2),
List(participant2RemovesItselfUnilaterally),
inStore = Map(ptpMappingHash -> participants_1_2_6_HostParty1),
transactionsInStore = Map(ptpMappingHash -> participant2HostsParty1),
expectFullAuthorization = false,
)
// it is still allowed to have a mix of signatures for unhosting
unhostingMixedResult <- validate(
validator,
unhostingMixedResult <- validator.validateAndUpdateHeadAuthState(
ts(2),
List(participant2RemovedFullyAuthorized),
inStore = Map(ptpMappingHash -> participants_1_2_6_HostParty1),
transactionsInStore = Map(ptpMappingHash -> participant2HostsParty1),
expectFullAuthorization = false,
)
// the participant being removed may not sign if anything else changes
unhostingAndThresholdChangeResult <- validate(
validator,
unhostingAndThresholdChangeResult <- validator.validateAndUpdateHeadAuthState(
ts(2),
List(
mkAddMultiKey(
unhostingMappingAndThresholdChange,
unhostingMapping
.focus(_.threshold)
.replace(PositiveInt.one),
NonEmpty(Set, SigningKeys.key2),
)
),
inStore = Map(ptpMappingHash -> participants_1_2_6_HostParty1),
transactionsInStore = Map(ptpMappingHash -> participant2HostsParty1),
expectFullAuthorization = false,
)
} yield {
@ -776,8 +657,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
ValidatedTopologyTransaction(_)
),
)
res <- validate(
validator,
res <- validator.validateAndUpdateHeadAuthState(
ts(1),
List(dns2),
decentralizedNamespaceWithMultipleOwnerThreshold
@ -815,8 +695,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
ValidatedTopologyTransaction(_)
),
)
res <- validate(
validator,
res <- validator.validateAndUpdateHeadAuthState(
ts(2),
// Analogously to how the TopologyStateProcessor merges the signatures of proposals
// with the same serial, combine the signature of the previous proposal to the current proposal.
@ -832,93 +711,9 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
check(res._2, Seq(None))
}
}
"remove from cache for TopologyChangeOp.REMOVAL" in {
val store =
new InMemoryTopologyStore(TopologyStoreId.AuthorizedStore, loggerFactory, timeouts)
val validator = mk(store)
import Factory.*
for {
// 1. validate and store the decentralized namespace owners root certificates
resultAddOwners <- validate(
validator,
ts(0),
decentralizedNamespaceOwners,
Map.empty,
expectFullAuthorization = true,
)
_ = resultAddOwners._2.foreach(_.rejectionReason shouldBe None)
_ <- store.update(
SequencedTime(ts(0)),
EffectiveTime(ts(0)),
removeMapping = Map.empty,
removeTxs = Set.empty,
additions = resultAddOwners._2,
)
// 2. validate and store the decentralized namespace definition
// this puts the DND authorization graph into the cache
resultAddDND <- validate(
validator,
ts(1),
List(dns1),
Map.empty,
expectFullAuthorization = true,
)
_ = resultAddDND._2.foreach(_.rejectionReason shouldBe None)
_ <- store.update(
SequencedTime(ts(1)),
EffectiveTime(ts(1)),
removeMapping = Map.empty,
removeTxs = Set.empty,
additions = resultAddDND._2,
)
// 3. now process the removal of the decentralized namespace definition
// this should remove the DND authorization graph from the cache
resRemoveDND <- validate(
validator,
ts(2),
List(dns1Removal),
Map(dns1.mapping.uniqueKey -> dns1),
expectFullAuthorization = true,
)
_ = resRemoveDND._2.foreach(_.rejectionReason shouldBe None)
_ <- store.update(
SequencedTime(ts(2)),
EffectiveTime(ts(2)),
removeMapping = Map(dns1Removal.mapping.uniqueKey -> dns1Removal.serial),
removeTxs = Set.empty,
additions = resRemoveDND._2,
)
// 4. Now to the actual test: try to authorize something for the decentralized namespace.
// this should be rejected because the namespace is not valid anymore, and the
// authorization cache has been properly cleaned up.
resultUnauthorizedIDD <- validate(
validator,
ts(3),
List(dns1Idd),
Map.empty,
expectFullAuthorization = true,
)
} yield {
check(
resultUnauthorizedIDD._2,
Seq(
Some(
_ == NoDelegationFoundForKeys(
Set(SigningKeys.key1, SigningKeys.key8, SigningKeys.key9).map(_.fingerprint)
)
)
),
)
}
}
}
def checkProposalFlagAfterValidation(validationIsFinal: Boolean, expectProposal: Boolean) = {
def checkProposalFlatAfterValidation(validationIsFinal: Boolean, expectProposal: Boolean) = {
val store =
new InMemoryTopologyStore(TopologyStoreId.AuthorizedStore, loggerFactory, timeouts)
val validator = mk(store, validationIsFinal)
@ -956,18 +751,18 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
),
BaseTest.testedProtocolVersion,
)
result_packageVetting <- validate(
validator,
ts(1),
toValidate = List(
// Setting isProposal=true despite having enough keys.
// This simulates processing a proposal with the signature of a node,
// that got merged with another proposal already in the store.
mkTrans(pkgTx, signingKeys = NonEmpty(Set, key1, key8), isProposal = true)
),
inStore = Map.empty,
expectFullAuthorization = false,
)
result_packageVetting <- validator
.validateAndUpdateHeadAuthState(
ts(1),
transactionsToValidate = List(
// Setting isProposal=true despite having enough keys.
// This simulates processing a proposal with the signature of a node,
// that got merged with another proposal already in the store.
mkTrans(pkgTx, signingKeys = NonEmpty(Set, key1, key8), isProposal = true)
),
transactionsInStore = Map.empty,
expectFullAuthorization = false,
)
} yield {
val validatedPkgTx = result_packageVetting._2.loneElement
@ -980,11 +775,11 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
}
"change the proposal status when the validation is final" in {
checkProposalFlagAfterValidation(validationIsFinal = true, expectProposal = false)
checkProposalFlatAfterValidation(validationIsFinal = true, expectProposal = false)
}
"not change the proposal status when the validation is not final" in {
checkProposalFlagAfterValidation(validationIsFinal = false, expectProposal = true)
checkProposalFlatAfterValidation(validationIsFinal = false, expectProposal = true)
}
"remove superfluous signatures" in {
@ -1025,27 +820,26 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
),
BaseTest.testedProtocolVersion,
)
resultPackageVetting <- validate(
validator,
ts(1),
toValidate = List(
// Signing this transaction also with key9 simulates that ns9 was part of the
// decentralized namespace before and was eligible for signing the transaction.
// After this validation, we expect the signature of key9 to be removed
mkTrans(pkgTx, signingKeys = NonEmpty(Set, key9, key1, key8), isProposal = true)
),
inStore = Map.empty,
expectFullAuthorization = false,
)
resultPackageVetting <- validator
.validateAndUpdateHeadAuthState(
ts(1),
transactionsToValidate = List(
// Signing this transaction also with key9 simulates that ns9 was part of the
// decentralized namespace before and was eligible for signing the transaction.
// After this validation, we expect the signature of key9 to be removed
mkTrans(pkgTx, signingKeys = NonEmpty(Set, key9, key1, key8), isProposal = true)
),
transactionsInStore = Map.empty,
expectFullAuthorization = false,
)
// if there are only superfluous signatures, reject the transaction
resultOnlySuperfluousSignatures <- validate(
validator,
resultOnlySuperfluousSignatures <- validator.validateAndUpdateHeadAuthState(
ts(2),
toValidate = List(
transactionsToValidate = List(
mkTrans(pkgTx, signingKeys = NonEmpty(Set, key3, key5), isProposal = true)
),
inStore = Map.empty,
transactionsInStore = Map.empty,
expectFullAuthorization = false,
)
@ -1099,19 +893,19 @@ class IncomingTopologyTransactionAuthorizationValidatorTest
expectFullAuthorization: Boolean,
signingKeys: SigningPublicKey*
) = TraceContext.withNewTraceContext { freshTraceContext =>
validate(
validator,
ts(1),
toValidate = List(
mkTrans(
pkgTx,
isProposal = isProposal,
signingKeys = NonEmpty.from(signingKeys.toSet).value,
)
),
inStore = Map.empty,
expectFullAuthorization = expectFullAuthorization,
)(freshTraceContext)
validator
.validateAndUpdateHeadAuthState(
ts(1),
transactionsToValidate = List(
mkTrans(
pkgTx,
isProposal = isProposal,
signingKeys = NonEmpty.from(signingKeys.toSet).value,
)
),
transactionsInStore = Map.empty,
expectFullAuthorization = expectFullAuthorization,
)(freshTraceContext)
.map(_._2.loneElement)
}

View File

@ -5,7 +5,7 @@ package com.digitalasset.canton.topology.processing
import com.daml.nonempty.NonEmpty
import com.digitalasset.canton.config.RequireTypes.PositiveInt
import com.digitalasset.canton.crypto.SigningPublicKey
import com.digitalasset.canton.crypto.{Fingerprint, SigningPublicKey}
import com.digitalasset.canton.logging.NamedLoggerFactory
import com.digitalasset.canton.protocol.TestDomainParameters
import com.digitalasset.canton.time.NonNegativeFiniteDuration
@ -98,7 +98,7 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc:
val p1p1B_k2 =
mkAdd(
PartyToParticipant.tryCreate(
PartyToParticipant(
party1b,
None,
threshold = PositiveInt.one,
@ -109,7 +109,7 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc:
)
val p1p6_k2 =
mkAdd(
PartyToParticipant.tryCreate(
PartyToParticipant(
party1b,
None,
threshold = PositiveInt.one,
@ -120,20 +120,20 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc:
isProposal = true,
)
val p1p6_k6 =
mkAddMultiKey(
PartyToParticipant.tryCreate(
mkAdd(
PartyToParticipant(
party1b,
None,
threshold = PositiveInt.one,
Seq(HostingParticipant(participant6, ParticipantPermission.Submission)),
groupAddressing = false,
),
NonEmpty(Set, key1, key6),
key6,
isProposal = true,
)
val p1p6_k2k6 =
mkAddMultiKey(
PartyToParticipant.tryCreate(
PartyToParticipant(
party1b,
None,
threshold = PositiveInt.one,
@ -145,7 +145,7 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc:
val p1p6B_k3 =
mkAdd(
PartyToParticipant.tryCreate(
PartyToParticipant(
party1b,
Some(domainId1),
threshold = PositiveInt.one,
@ -192,15 +192,6 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc:
NonEmpty(Set, key1, key8, key9),
serial = PositiveInt.one,
)
val dns1Removal = mkRemove(
dns1.mapping,
NonEmpty(Set, key1, key8, key9),
serial = PositiveInt.two,
)
val dns1Idd = mkAddMultiKey(
IdentifierDelegation(UniqueIdentifier.tryCreate("test", dns1.mapping.namespace), key4),
NonEmpty(Set, key1, key8, key9),
)
val dns2 = mkAdd(
DecentralizedNamespaceDefinition
.create(ns7, PositiveInt.one, NonEmpty(Set, ns1))
@ -223,19 +214,15 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc:
serial = PositiveInt.two,
isProposal = true,
)
val decentralizedNamespaceOwners = List(ns1k1_k1, ns8k8_k8, ns9k9_k9)
val decentralizedNamespaceWithMultipleOwnerThreshold =
List(ns1k1_k1, ns8k8_k8, ns9k9_k9, ns7k7_k7, dns1)
private val dndOwners =
NonEmpty(Set, key1.fingerprint, key2.fingerprint, key3.fingerprint).map(Namespace(_))
private val dndNamespace = DecentralizedNamespaceDefinition.computeNamespace(dndOwners)
val dnd_proposal_k1 = mkAdd(
DecentralizedNamespaceDefinition
.create(
dndNamespace,
Namespace(Fingerprint.tryCreate("dnd-namespace")),
PositiveInt.two,
dndOwners,
NonEmpty(Set, key1.fingerprint, key2.fingerprint, key3.fingerprint).map(Namespace(_)),
)
.fold(sys.error, identity),
signingKey = key1,
@ -244,7 +231,7 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc:
val dnd_proposal_k2 = mkAdd(
DecentralizedNamespaceDefinition
.create(
dndNamespace,
Namespace(Fingerprint.tryCreate("dnd-namespace")),
PositiveInt.two,
NonEmpty(Set, key1.fingerprint, key2.fingerprint, key3.fingerprint).map(Namespace(_)),
)
@ -255,7 +242,7 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc:
val dnd_proposal_k3 = mkAdd(
DecentralizedNamespaceDefinition
.create(
dndNamespace,
Namespace(Fingerprint.tryCreate("dnd-namespace")),
PositiveInt.two,
NonEmpty(Set, key1.fingerprint, key2.fingerprint, key3.fingerprint).map(Namespace(_)),
)

View File

@ -89,7 +89,7 @@ class TopologyStoreTestData(
serial = PositiveInt.tryCreate(1),
)
val tx3_PTP_Proposal = makeSignedTx(
PartyToParticipant.tryCreate(
PartyToParticipant(
partyId = fredOfCanton,
domainId = None,
threshold = PositiveInt.one,
@ -116,7 +116,7 @@ class TopologyStoreTestData(
serial = PositiveInt.tryCreate(2),
)
val tx5_PTP = makeSignedTx(
PartyToParticipant.tryCreate(
PartyToParticipant(
partyId = fredOfCanton,
domainId = None,
threshold = PositiveInt.one,

Some files were not shown because too many files have changed in this diff Show More